You can not select more than 25 topics
Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
53902 lines
2.0 MiB
53902 lines
2.0 MiB
// Copyright (C) 2019 The Android Open Source Project
|
|
//
|
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
// you may not use this file except in compliance with the License.
|
|
// You may obtain a copy of the License at
|
|
//
|
|
// http://www.apache.org/licenses/LICENSE-2.0
|
|
//
|
|
// Unless required by applicable law or agreed to in writing, software
|
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
// See the License for the specific language governing permissions and
|
|
// limitations under the License.
|
|
//
|
|
// This file is automatically generated by gen_amalgamated. Do not edit.
|
|
|
|
// gen_amalgamated: predefined macros
|
|
#if !defined(GOOGLE_PROTOBUF_NO_RTTI)
|
|
#define GOOGLE_PROTOBUF_NO_RTTI
|
|
#endif
|
|
#if !defined(PERFETTO_IMPLEMENTATION)
|
|
#define PERFETTO_IMPLEMENTATION
|
|
#endif
|
|
#if !defined(GOOGLE_PROTOBUF_NO_STATIC_INITIALIZER)
|
|
#define GOOGLE_PROTOBUF_NO_STATIC_INITIALIZER
|
|
#endif
|
|
#include "perfetto.h"
|
|
// gen_amalgamated begin source: src/base/file_utils.cc
|
|
// gen_amalgamated begin header: include/perfetto/ext/base/file_utils.h
|
|
// gen_amalgamated begin header: include/perfetto/ext/base/utils.h
|
|
/*
|
|
* Copyright (C) 2017 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
#ifndef INCLUDE_PERFETTO_EXT_BASE_UTILS_H_
|
|
#define INCLUDE_PERFETTO_EXT_BASE_UTILS_H_
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/base/build_config.h"
|
|
// gen_amalgamated expanded: #include "perfetto/base/compiler.h"
|
|
|
|
#include <errno.h>
|
|
#include <stddef.h>
|
|
#include <stdlib.h>
|
|
#if !PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
|
|
#include <sys/types.h>
|
|
#endif
|
|
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_OS_LINUX) || \
|
|
PERFETTO_BUILDFLAG(PERFETTO_OS_ANDROID)
|
|
#include <unistd.h> // For getpagesize().
|
|
#elif PERFETTO_BUILDFLAG(PERFETTO_OS_APPLE)
|
|
#include <mach/vm_page_size.h>
|
|
#endif
|
|
|
|
#include <atomic>
|
|
|
|
#define PERFETTO_EINTR(x) \
|
|
([&] { \
|
|
decltype(x) eintr_wrapper_result; \
|
|
do { \
|
|
eintr_wrapper_result = (x); \
|
|
} while (eintr_wrapper_result == -1 && errno == EINTR); \
|
|
return eintr_wrapper_result; \
|
|
}())
|
|
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
|
|
// TODO(brucedawson) - create a ::perfetto::base::IOSize to replace this.
|
|
#if defined(_WIN64)
|
|
using ssize_t = __int64;
|
|
#else
|
|
using ssize_t = long;
|
|
#endif
|
|
#endif
|
|
|
|
namespace perfetto {
|
|
namespace base {
|
|
|
|
#if !PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
|
|
constexpr uid_t kInvalidUid = static_cast<uid_t>(-1);
|
|
constexpr pid_t kInvalidPid = static_cast<pid_t>(-1);
|
|
#endif
|
|
|
|
// Do not add new usages of kPageSize, consider using GetSysPageSize() below.
|
|
// TODO(primiano): over time the semantic of kPageSize became too ambiguous.
|
|
// Strictly speaking, this constant is incorrect on some new devices where the
|
|
// page size can be 16K (e.g., crbug.com/1116576). Unfortunately too much code
|
|
// ended up depending on kPageSize for purposes that are not strictly related
|
|
// with the kernel's mm subsystem.
|
|
constexpr size_t kPageSize = 4096;
|
|
|
|
// Returns the system's page size. Use this when dealing with mmap, madvise and
|
|
// similar mm-related syscalls.
|
|
inline uint32_t GetSysPageSize() {
|
|
ignore_result(kPageSize); // Just to keep the amalgamated build happy.
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_OS_LINUX) || \
|
|
PERFETTO_BUILDFLAG(PERFETTO_OS_ANDROID)
|
|
static std::atomic<uint32_t> page_size{0};
|
|
// This function might be called in hot paths. Avoid calling getpagesize() all
|
|
// the times, in many implementations getpagesize() calls sysconf() which is
|
|
// not cheap.
|
|
uint32_t cached_value = page_size.load(std::memory_order_relaxed);
|
|
if (PERFETTO_UNLIKELY(cached_value == 0)) {
|
|
cached_value = static_cast<uint32_t>(getpagesize());
|
|
page_size.store(cached_value, std::memory_order_relaxed);
|
|
}
|
|
return cached_value;
|
|
#elif PERFETTO_BUILDFLAG(PERFETTO_OS_APPLE)
|
|
return static_cast<uint32_t>(vm_page_size);
|
|
#else
|
|
return 4096;
|
|
#endif
|
|
}
|
|
|
|
template <typename T>
|
|
constexpr size_t ArraySize(const T& array) {
|
|
return sizeof(array) / sizeof(array[0]);
|
|
}
|
|
|
|
// Function object which invokes 'free' on its parameter, which must be
|
|
// a pointer. Can be used to store malloc-allocated pointers in std::unique_ptr:
|
|
//
|
|
// std::unique_ptr<int, base::FreeDeleter> foo_ptr(
|
|
// static_cast<int*>(malloc(sizeof(int))));
|
|
struct FreeDeleter {
|
|
inline void operator()(void* ptr) const { free(ptr); }
|
|
};
|
|
|
|
template <typename T>
|
|
constexpr T AssumeLittleEndian(T value) {
|
|
static_assert(__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__,
|
|
"Unimplemented on big-endian archs");
|
|
return value;
|
|
}
|
|
|
|
// Round up |size| to a multiple of |alignment| (must be a power of two).
|
|
template <size_t alignment>
|
|
constexpr size_t AlignUp(size_t size) {
|
|
static_assert((alignment & (alignment - 1)) == 0, "alignment must be a pow2");
|
|
return (size + alignment - 1) & ~(alignment - 1);
|
|
}
|
|
|
|
inline bool IsAgain(int err) {
|
|
return err == EAGAIN || err == EWOULDBLOCK;
|
|
}
|
|
|
|
} // namespace base
|
|
} // namespace perfetto
|
|
|
|
#endif // INCLUDE_PERFETTO_EXT_BASE_UTILS_H_
|
|
/*
|
|
* Copyright (C) 2018 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
#ifndef INCLUDE_PERFETTO_EXT_BASE_FILE_UTILS_H_
|
|
#define INCLUDE_PERFETTO_EXT_BASE_FILE_UTILS_H_
|
|
|
|
#include <stddef.h>
|
|
|
|
#include <string>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/utils.h"
|
|
|
|
namespace perfetto {
|
|
namespace base {
|
|
|
|
bool ReadFileDescriptor(int fd, std::string* out);
|
|
bool ReadFileStream(FILE* f, std::string* out);
|
|
bool ReadFile(const std::string& path, std::string* out);
|
|
|
|
// Call write until all data is written or an error is detected.
|
|
//
|
|
// man 2 write:
|
|
// If a write() is interrupted by a signal handler before any bytes are
|
|
// written, then the call fails with the error EINTR; if it is
|
|
// interrupted after at least one byte has been written, the call
|
|
// succeeds, and returns the number of bytes written.
|
|
ssize_t WriteAll(int fd, const void* buf, size_t count);
|
|
|
|
bool FlushFile(int fd);
|
|
|
|
} // namespace base
|
|
} // namespace perfetto
|
|
|
|
#endif // INCLUDE_PERFETTO_EXT_BASE_FILE_UTILS_H_
|
|
// gen_amalgamated begin header: include/perfetto/ext/base/scoped_file.h
|
|
/*
|
|
* Copyright (C) 2017 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
#ifndef INCLUDE_PERFETTO_EXT_BASE_SCOPED_FILE_H_
|
|
#define INCLUDE_PERFETTO_EXT_BASE_SCOPED_FILE_H_
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/base/build_config.h"
|
|
|
|
#include <fcntl.h>
|
|
#include <stdio.h>
|
|
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_OS_WIN) && \
|
|
!PERFETTO_BUILDFLAG(PERFETTO_COMPILER_GCC)
|
|
#include <corecrt_io.h>
|
|
typedef int mode_t;
|
|
#else
|
|
#include <dirent.h>
|
|
#include <unistd.h>
|
|
#endif
|
|
|
|
#include <string>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/base/logging.h"
|
|
|
|
namespace perfetto {
|
|
namespace base {
|
|
|
|
constexpr mode_t kInvalidMode = static_cast<mode_t>(-1);
|
|
|
|
// RAII classes for auto-releasing fds and dirs.
|
|
template <typename T,
|
|
int (*CloseFunction)(T),
|
|
T InvalidValue,
|
|
bool CheckClose = true>
|
|
class ScopedResource {
|
|
public:
|
|
explicit ScopedResource(T t = InvalidValue) : t_(t) {}
|
|
ScopedResource(ScopedResource&& other) noexcept {
|
|
t_ = other.t_;
|
|
other.t_ = InvalidValue;
|
|
}
|
|
ScopedResource& operator=(ScopedResource&& other) {
|
|
reset(other.t_);
|
|
other.t_ = InvalidValue;
|
|
return *this;
|
|
}
|
|
T get() const { return t_; }
|
|
T operator*() const { return t_; }
|
|
explicit operator bool() const { return t_ != InvalidValue; }
|
|
void reset(T r = InvalidValue) {
|
|
if (t_ != InvalidValue) {
|
|
int res = CloseFunction(t_);
|
|
if (CheckClose)
|
|
PERFETTO_CHECK(res == 0);
|
|
}
|
|
t_ = r;
|
|
}
|
|
T release() {
|
|
T t = t_;
|
|
t_ = InvalidValue;
|
|
return t;
|
|
}
|
|
~ScopedResource() { reset(InvalidValue); }
|
|
|
|
private:
|
|
ScopedResource(const ScopedResource&) = delete;
|
|
ScopedResource& operator=(const ScopedResource&) = delete;
|
|
|
|
T t_;
|
|
};
|
|
|
|
using ScopedFile = ScopedResource<int, close, -1>;
|
|
inline static ScopedFile OpenFile(const std::string& path,
|
|
int flags,
|
|
mode_t mode = kInvalidMode) {
|
|
PERFETTO_DCHECK((flags & O_CREAT) == 0 || mode != kInvalidMode);
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
|
|
// Always use O_BINARY on Windows, to avoid silly EOL translations.
|
|
ScopedFile fd(open(path.c_str(), flags | O_BINARY, mode));
|
|
#else
|
|
// Always open a ScopedFile with O_CLOEXEC so we can safely fork and exec.
|
|
ScopedFile fd(open(path.c_str(), flags | O_CLOEXEC, mode));
|
|
#endif
|
|
return fd;
|
|
}
|
|
#if !PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
|
|
using ScopedDir = ScopedResource<DIR*, closedir, nullptr>;
|
|
#endif
|
|
|
|
using ScopedFstream = ScopedResource<FILE*, fclose, nullptr>;
|
|
|
|
} // namespace base
|
|
} // namespace perfetto
|
|
|
|
#endif // INCLUDE_PERFETTO_EXT_BASE_SCOPED_FILE_H_
|
|
/*
|
|
* Copyright (C) 2018 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
#include <sys/stat.h>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/base/build_config.h"
|
|
// gen_amalgamated expanded: #include "perfetto/base/logging.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/file_utils.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/scoped_file.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/utils.h"
|
|
|
|
#if !PERFETTO_BUILDFLAG(PERFETTO_OS_WIN) || \
|
|
PERFETTO_BUILDFLAG(PERFETTO_COMPILER_GCC)
|
|
#include <unistd.h>
|
|
#else
|
|
#include <corecrt_io.h>
|
|
#include <io.h>
|
|
#endif
|
|
|
|
namespace perfetto {
|
|
namespace base {
|
|
namespace {
|
|
constexpr size_t kBufSize = 2048;
|
|
}
|
|
|
|
bool ReadFileDescriptor(int fd, std::string* out) {
|
|
// Do not override existing data in string.
|
|
size_t i = out->size();
|
|
|
|
struct stat buf {};
|
|
if (fstat(fd, &buf) != -1) {
|
|
if (buf.st_size > 0)
|
|
out->resize(i + static_cast<size_t>(buf.st_size));
|
|
}
|
|
|
|
ssize_t bytes_read;
|
|
for (;;) {
|
|
if (out->size() < i + kBufSize)
|
|
out->resize(out->size() + kBufSize);
|
|
|
|
bytes_read = PERFETTO_EINTR(read(fd, &((*out)[i]), kBufSize));
|
|
if (bytes_read > 0) {
|
|
i += static_cast<size_t>(bytes_read);
|
|
} else {
|
|
out->resize(i);
|
|
return bytes_read == 0;
|
|
}
|
|
}
|
|
}
|
|
|
|
bool ReadFileStream(FILE* f, std::string* out) {
|
|
return ReadFileDescriptor(fileno(f), out);
|
|
}
|
|
|
|
bool ReadFile(const std::string& path, std::string* out) {
|
|
base::ScopedFile fd = base::OpenFile(path, O_RDONLY);
|
|
if (!fd)
|
|
return false;
|
|
|
|
return ReadFileDescriptor(*fd, out);
|
|
}
|
|
|
|
ssize_t WriteAll(int fd, const void* buf, size_t count) {
|
|
size_t written = 0;
|
|
while (written < count) {
|
|
ssize_t wr = PERFETTO_EINTR(
|
|
write(fd, static_cast<const char*>(buf) + written, count - written));
|
|
if (wr == 0)
|
|
break;
|
|
if (wr < 0)
|
|
return wr;
|
|
written += static_cast<size_t>(wr);
|
|
}
|
|
return static_cast<ssize_t>(written);
|
|
}
|
|
|
|
bool FlushFile(int fd) {
|
|
PERFETTO_DCHECK(fd != 0);
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_OS_LINUX) || \
|
|
PERFETTO_BUILDFLAG(PERFETTO_OS_ANDROID)
|
|
return !PERFETTO_EINTR(fdatasync(fd));
|
|
#elif PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
|
|
return !PERFETTO_EINTR(_commit(fd));
|
|
#else
|
|
return !PERFETTO_EINTR(fsync(fd));
|
|
#endif
|
|
}
|
|
|
|
} // namespace base
|
|
} // namespace perfetto
|
|
// gen_amalgamated begin source: src/base/logging.cc
|
|
/*
|
|
* Copyright (C) 2019 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/base/logging.h"
|
|
|
|
#include <stdarg.h>
|
|
#include <stdio.h>
|
|
|
|
#if !PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
|
|
#include <unistd.h> // For isatty()
|
|
#endif
|
|
|
|
#include <memory>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/base/time.h"
|
|
|
|
namespace perfetto {
|
|
namespace base {
|
|
|
|
namespace {
|
|
const char kReset[] = "\x1b[0m";
|
|
const char kDefault[] = "\x1b[39m";
|
|
const char kDim[] = "\x1b[2m";
|
|
const char kRed[] = "\x1b[31m";
|
|
const char kBoldGreen[] = "\x1b[1m\x1b[32m";
|
|
const char kLightGray[] = "\x1b[90m";
|
|
|
|
} // namespace
|
|
|
|
void LogMessage(LogLev level,
|
|
const char* fname,
|
|
int line,
|
|
const char* fmt,
|
|
...) {
|
|
char stack_buf[512];
|
|
std::unique_ptr<char[]> large_buf;
|
|
char* log_msg = &stack_buf[0];
|
|
|
|
// By default use a stack allocated buffer because most log messages are quite
|
|
// short. In rare cases they can be larger (e.g. --help). In those cases we
|
|
// pay the cost of allocating the buffer on the heap.
|
|
for (size_t max_len = sizeof(stack_buf);;) {
|
|
va_list args;
|
|
va_start(args, fmt);
|
|
int res = vsnprintf(log_msg, max_len, fmt, args);
|
|
va_end(args);
|
|
|
|
// If for any reason the print fails, overwrite the message but still print
|
|
// it. The code below will attach the filename and line, which is still
|
|
// useful.
|
|
if (res < 0) {
|
|
strncpy(log_msg, "[printf format error]", max_len);
|
|
break;
|
|
}
|
|
|
|
// if res == max_len, vsnprintf saturated the input buffer. Retry with a
|
|
// larger buffer in that case (within reasonable limits).
|
|
if (res < static_cast<int>(max_len) || max_len >= 128 * 1024)
|
|
break;
|
|
max_len *= 4;
|
|
large_buf.reset(new char[max_len]);
|
|
log_msg = &large_buf[0];
|
|
}
|
|
|
|
const char* color = kDefault;
|
|
switch (level) {
|
|
case kLogDebug:
|
|
color = kDim;
|
|
break;
|
|
case kLogInfo:
|
|
color = kDefault;
|
|
break;
|
|
case kLogImportant:
|
|
color = kBoldGreen;
|
|
break;
|
|
case kLogError:
|
|
color = kRed;
|
|
break;
|
|
}
|
|
|
|
#if !PERFETTO_BUILDFLAG(PERFETTO_OS_WIN) && \
|
|
!PERFETTO_BUILDFLAG(PERFETTO_OS_WASM)
|
|
static const bool use_colors = isatty(STDERR_FILENO);
|
|
#else
|
|
static const bool use_colors = false;
|
|
#endif
|
|
|
|
// Formats file.cc:line as a space-padded fixed width string. If the file name
|
|
// |fname| is too long, truncate it on the left-hand side.
|
|
char line_str[10];
|
|
size_t line_len =
|
|
static_cast<size_t>(snprintf(line_str, sizeof(line_str), "%d", line));
|
|
|
|
// 24 will be the width of the file.cc:line column in the log event.
|
|
char file_and_line[24];
|
|
size_t fname_len = strlen(fname);
|
|
size_t fname_max = sizeof(file_and_line) - line_len - 2; // 2 = ':' + '\0'.
|
|
size_t fname_offset = fname_len <= fname_max ? 0 : fname_len - fname_max;
|
|
int len = snprintf(file_and_line, sizeof(file_and_line), "%s:%s",
|
|
fname + fname_offset, line_str);
|
|
memset(&file_and_line[len], ' ', sizeof(file_and_line) - size_t(len));
|
|
file_and_line[sizeof(file_and_line) - 1] = '\0';
|
|
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_OS_ANDROID)
|
|
// Logcat has already timestamping, don't re-emit it.
|
|
__android_log_print(ANDROID_LOG_DEBUG + level, "perfetto", "%s %s",
|
|
file_and_line, log_msg);
|
|
#endif
|
|
|
|
// When printing on stderr, print also the timestamp. We don't really care
|
|
// about the actual time. We just need some reference clock that can be used
|
|
// to correlated events across differrent processses (e.g. traced and
|
|
// traced_probes). The wall time % 1000 is good enough.
|
|
char timestamp[32];
|
|
uint32_t t_ms = static_cast<uint32_t>(GetWallTimeMs().count());
|
|
uint32_t t_sec = t_ms / 1000;
|
|
t_ms -= t_sec * 1000;
|
|
t_sec = t_sec % 1000;
|
|
snprintf(timestamp, sizeof(timestamp), "[%03u.%03u] ", t_sec, t_ms);
|
|
|
|
if (use_colors) {
|
|
fprintf(stderr, "%s%s%s%s %s%s%s\n", kLightGray, timestamp, file_and_line,
|
|
kReset, color, log_msg, kReset);
|
|
} else {
|
|
fprintf(stderr, "%s%s %s\n", timestamp, file_and_line, log_msg);
|
|
}
|
|
}
|
|
|
|
} // namespace base
|
|
} // namespace perfetto
|
|
// gen_amalgamated begin source: src/base/metatrace.cc
|
|
// gen_amalgamated begin header: include/perfetto/ext/base/metatrace.h
|
|
// gen_amalgamated begin header: include/perfetto/ext/base/metatrace_events.h
|
|
/*
|
|
* Copyright (C) 2019 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
#ifndef INCLUDE_PERFETTO_EXT_BASE_METATRACE_EVENTS_H_
|
|
#define INCLUDE_PERFETTO_EXT_BASE_METATRACE_EVENTS_H_
|
|
|
|
#include <stdint.h>
|
|
|
|
namespace perfetto {
|
|
namespace metatrace {
|
|
|
|
enum Tags : uint32_t {
|
|
TAG_NONE = 0,
|
|
TAG_ANY = uint32_t(-1),
|
|
TAG_FTRACE = 1 << 0,
|
|
TAG_PROC_POLLERS = 1 << 1,
|
|
TAG_TRACE_WRITER = 1 << 2,
|
|
TAG_TRACE_SERVICE = 1 << 3,
|
|
TAG_PRODUCER = 1 << 4,
|
|
};
|
|
|
|
// The macros below generate matching enums and arrays of string literals.
|
|
// This is to avoid maintaining string maps manually.
|
|
|
|
// clang-format off
|
|
|
|
// DO NOT remove or reshuffle items in this list, only append. The ID of these
|
|
// events are an ABI, the trace processor relies on these to open old traces.
|
|
#define PERFETTO_METATRACE_EVENTS(F) \
|
|
F(EVENT_ZERO_UNUSED), \
|
|
F(FTRACE_CPU_READER_READ), /*unused*/ \
|
|
F(FTRACE_DRAIN_CPUS), /*unused*/ \
|
|
F(FTRACE_UNBLOCK_READERS), /*unused*/ \
|
|
F(FTRACE_CPU_READ_NONBLOCK), /*unused*/ \
|
|
F(FTRACE_CPU_READ_BLOCK), /*unused*/ \
|
|
F(FTRACE_CPU_SPLICE_NONBLOCK), /*unused*/ \
|
|
F(FTRACE_CPU_SPLICE_BLOCK), /*unused*/ \
|
|
F(FTRACE_CPU_WAIT_CMD), /*unused*/ \
|
|
F(FTRACE_CPU_RUN_CYCLE), /*unused*/ \
|
|
F(FTRACE_CPU_FLUSH), \
|
|
F(FTRACE_CPU_DRAIN), /*unused*/ \
|
|
F(READ_SYS_STATS), \
|
|
F(PS_WRITE_ALL_PROCESSES), \
|
|
F(PS_ON_PIDS), \
|
|
F(PS_ON_RENAME_PIDS), \
|
|
F(PS_WRITE_ALL_PROCESS_STATS), \
|
|
F(TRACE_WRITER_COMMIT_STARTUP_WRITER_BATCH), \
|
|
F(FTRACE_READ_TICK), \
|
|
F(FTRACE_CPU_READ_CYCLE), \
|
|
F(FTRACE_CPU_READ_BATCH), \
|
|
F(KALLSYMS_PARSE), \
|
|
F(PROFILER_READ_TICK), \
|
|
F(PROFILER_READ_CPU), \
|
|
F(PROFILER_UNWIND_TICK), \
|
|
F(PROFILER_UNWIND_SAMPLE), \
|
|
F(PROFILER_UNWIND_INITIAL_ATTEMPT), \
|
|
F(PROFILER_UNWIND_ATTEMPT), \
|
|
F(PROFILER_MAPS_PARSE), \
|
|
F(PROFILER_MAPS_REPARSE), \
|
|
F(PROFILER_UNWIND_CACHE_CLEAR)
|
|
|
|
// Append only, see above.
|
|
//
|
|
// Values that aren't used as counters:
|
|
// * FTRACE_SERVICE_COMMIT_DATA is a bit-packed representation of an event, see
|
|
// tracing_service_impl.cc for the format.
|
|
// * PROFILER_UNWIND_CURRENT_PID represents the PID that is being unwound.
|
|
//
|
|
#define PERFETTO_METATRACE_COUNTERS(F) \
|
|
F(COUNTER_ZERO_UNUSED),\
|
|
F(FTRACE_PAGES_DRAINED), \
|
|
F(PS_PIDS_SCANNED), \
|
|
F(TRACE_SERVICE_COMMIT_DATA), \
|
|
F(PROFILER_UNWIND_QUEUE_SZ), \
|
|
F(PROFILER_UNWIND_CURRENT_PID)
|
|
|
|
// clang-format on
|
|
|
|
#define PERFETTO_METATRACE_IDENTITY(name) name
|
|
#define PERFETTO_METATRACE_TOSTRING(name) #name
|
|
|
|
enum Events : uint16_t {
|
|
PERFETTO_METATRACE_EVENTS(PERFETTO_METATRACE_IDENTITY),
|
|
EVENTS_MAX
|
|
};
|
|
constexpr char const* kEventNames[] = {
|
|
PERFETTO_METATRACE_EVENTS(PERFETTO_METATRACE_TOSTRING)};
|
|
|
|
enum Counters : uint16_t {
|
|
PERFETTO_METATRACE_COUNTERS(PERFETTO_METATRACE_IDENTITY),
|
|
COUNTERS_MAX
|
|
};
|
|
constexpr char const* kCounterNames[] = {
|
|
PERFETTO_METATRACE_COUNTERS(PERFETTO_METATRACE_TOSTRING)};
|
|
|
|
inline void SuppressUnusedVarsInAmalgamatedBuild() {
|
|
(void)kCounterNames;
|
|
(void)kEventNames;
|
|
}
|
|
|
|
} // namespace metatrace
|
|
} // namespace perfetto
|
|
|
|
#endif // INCLUDE_PERFETTO_EXT_BASE_METATRACE_EVENTS_H_
|
|
// gen_amalgamated begin header: include/perfetto/ext/base/thread_annotations.h
|
|
/*
|
|
* Copyright (C) 2019 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
#ifndef INCLUDE_PERFETTO_EXT_BASE_THREAD_ANNOTATIONS_H_
|
|
#define INCLUDE_PERFETTO_EXT_BASE_THREAD_ANNOTATIONS_H_
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/base/build_config.h"
|
|
|
|
// Windows TSAN doesn't currently support these annotations.
|
|
#if defined(THREAD_SANITIZER) && !PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
|
|
extern "C" {
|
|
void AnnotateBenignRaceSized(const char* file,
|
|
int line,
|
|
unsigned long address,
|
|
unsigned long size,
|
|
const char* description);
|
|
}
|
|
|
|
#define PERFETTO_ANNOTATE_BENIGN_RACE_SIZED(pointer, size, description) \
|
|
AnnotateBenignRaceSized(__FILE__, __LINE__, \
|
|
reinterpret_cast<unsigned long>(pointer), size, \
|
|
description);
|
|
#else // defined(ADDRESS_SANITIZER)
|
|
#define PERFETTO_ANNOTATE_BENIGN_RACE_SIZED(pointer, size, description)
|
|
#endif // defined(ADDRESS_SANITIZER)
|
|
|
|
#endif // INCLUDE_PERFETTO_EXT_BASE_THREAD_ANNOTATIONS_H_
|
|
/*
|
|
* Copyright (C) 2019 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
#ifndef INCLUDE_PERFETTO_EXT_BASE_METATRACE_H_
|
|
#define INCLUDE_PERFETTO_EXT_BASE_METATRACE_H_
|
|
|
|
#include <array>
|
|
#include <atomic>
|
|
#include <functional>
|
|
#include <string>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/base/logging.h"
|
|
// gen_amalgamated expanded: #include "perfetto/base/thread_utils.h"
|
|
// gen_amalgamated expanded: #include "perfetto/base/time.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/metatrace_events.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/thread_annotations.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/utils.h"
|
|
|
|
// A facility to trace execution of the perfetto codebase itself.
|
|
// The meta-tracing framework is organized into three layers:
|
|
//
|
|
// 1. A static ring-buffer in base/ (this file) that supports concurrent writes
|
|
// and a single reader.
|
|
// The responsibility of this layer is to store events and counters as
|
|
// efficiently as possible without re-entering any tracing code.
|
|
// This is really a static-storage-based ring-buffer based on a POD array.
|
|
// This layer does NOT deal with serializing the meta-trace buffer.
|
|
// It posts a task when it's half full and expects something outside of
|
|
// base/ to drain the ring-buffer and serialize it, eventually writing it
|
|
// into the trace itself, before it gets 100% full.
|
|
//
|
|
// 2. A class in tracing/core which takes care of serializing the meta-trace
|
|
// buffer into the trace using a TraceWriter. See metatrace_writer.h .
|
|
//
|
|
// 3. A data source in traced_probes that, when be enabled via the trace config,
|
|
// injects metatrace events into the trace. See metatrace_data_source.h .
|
|
//
|
|
// The available events and tags are defined in metatrace_events.h .
|
|
|
|
namespace perfetto {
|
|
|
|
namespace base {
|
|
class TaskRunner;
|
|
} // namespace base
|
|
|
|
namespace metatrace {
|
|
|
|
// Meta-tracing is organized in "tags" that can be selectively enabled. This is
|
|
// to enable meta-tracing only of one sub-system. This word has one "enabled"
|
|
// bit for each tag. 0 -> meta-tracing off.
|
|
extern std::atomic<uint32_t> g_enabled_tags;
|
|
|
|
// Time of the Enable() call. Used as a reference for keeping delta timestmaps
|
|
// in Record.
|
|
extern std::atomic<uint64_t> g_enabled_timestamp;
|
|
|
|
// Enables meta-tracing for one or more tags. Once enabled it will discard any
|
|
// further Enable() calls and return false until disabled,
|
|
// |read_task| is a closure that will be called enqueued |task_runner| when the
|
|
// meta-tracing ring buffer is half full. The task is expected to read the ring
|
|
// buffer using RingBuffer::GetReadIterator() and serialize the contents onto a
|
|
// file or into the trace itself.
|
|
// Must be called on the |task_runner| passed.
|
|
// |task_runner| must have static lifetime.
|
|
bool Enable(std::function<void()> read_task, base::TaskRunner*, uint32_t tags);
|
|
|
|
// Disables meta-tracing.
|
|
// Must be called on the same |task_runner| as Enable().
|
|
void Disable();
|
|
|
|
inline uint64_t TraceTimeNowNs() {
|
|
return static_cast<uint64_t>(base::GetBootTimeNs().count());
|
|
}
|
|
|
|
// Returns a relaxed view of whether metatracing is enabled for the given tag.
|
|
// Useful for skipping unnecessary argument computation if metatracing is off.
|
|
inline bool IsEnabled(uint32_t tag) {
|
|
auto enabled_tags = g_enabled_tags.load(std::memory_order_relaxed);
|
|
if (PERFETTO_LIKELY((enabled_tags & tag) == 0))
|
|
return false;
|
|
else
|
|
return true;
|
|
}
|
|
|
|
// Holds the data for a metatrace event or counter.
|
|
struct Record {
|
|
static constexpr uint16_t kTypeMask = 0x8000;
|
|
static constexpr uint16_t kTypeCounter = 0x8000;
|
|
static constexpr uint16_t kTypeEvent = 0;
|
|
|
|
uint64_t timestamp_ns() const {
|
|
auto base_ns = g_enabled_timestamp.load(std::memory_order_relaxed);
|
|
PERFETTO_DCHECK(base_ns);
|
|
return base_ns + ((static_cast<uint64_t>(timestamp_ns_high) << 32) |
|
|
timestamp_ns_low);
|
|
}
|
|
|
|
void set_timestamp(uint64_t ts) {
|
|
auto t_start = g_enabled_timestamp.load(std::memory_order_relaxed);
|
|
uint64_t diff = ts - t_start;
|
|
PERFETTO_DCHECK(diff < (1ull << 48));
|
|
timestamp_ns_low = static_cast<uint32_t>(diff);
|
|
timestamp_ns_high = static_cast<uint16_t>(diff >> 32);
|
|
}
|
|
|
|
// We can't just memset() this class because on MSVC std::atomic<> is not
|
|
// trivially constructible anymore. Also std::atomic<> has a deleted copy
|
|
// constructor so we cant just do "*this = Record()" either.
|
|
// See http://bit.ly/339Jlzd .
|
|
void clear() {
|
|
this->~Record();
|
|
new (this) Record();
|
|
}
|
|
|
|
// This field holds the type (counter vs event) in the MSB and event ID (as
|
|
// defined in metatrace_events.h) in the lowest 15 bits. It is also used also
|
|
// as a linearization point: this is always written after all the other
|
|
// fields with a release-store. This is so the reader can determine whether it
|
|
// can safely process the other event fields after a load-acquire.
|
|
std::atomic<uint16_t> type_and_id{};
|
|
|
|
// Timestamp is stored as a 48-bits value diffed against g_enabled_timestamp.
|
|
// This gives us 78 hours from Enabled().
|
|
uint16_t timestamp_ns_high = 0;
|
|
uint32_t timestamp_ns_low = 0;
|
|
|
|
uint32_t thread_id = 0;
|
|
|
|
union {
|
|
// Only one of the two elements can be zero initialized, clang complains
|
|
// about "initializing multiple members of union" otherwise.
|
|
uint32_t duration_ns = 0; // If type == event.
|
|
int32_t counter_value; // If type == counter.
|
|
};
|
|
};
|
|
|
|
// Hold the meta-tracing data into a statically allocated array.
|
|
// This class uses static storage (as opposite to being a singleton) to:
|
|
// - Have the guarantee of always valid storage, so that meta-tracing can be
|
|
// safely used in any part of the codebase, including base/ itself.
|
|
// - Avoid barriers that thread-safe static locals would require.
|
|
class RingBuffer {
|
|
public:
|
|
static constexpr size_t kCapacity = 4096; // 4096 * 16 bytes = 64K.
|
|
|
|
// This iterator is not idempotent and will bump the read index in the buffer
|
|
// at the end of the reads. There can be only one reader at any time.
|
|
// Usage: for (auto it = RingBuffer::GetReadIterator(); it; ++it) { it->... }
|
|
class ReadIterator {
|
|
public:
|
|
ReadIterator(ReadIterator&& other) {
|
|
PERFETTO_DCHECK(other.valid_);
|
|
cur_ = other.cur_;
|
|
end_ = other.end_;
|
|
valid_ = other.valid_;
|
|
other.valid_ = false;
|
|
}
|
|
|
|
~ReadIterator() {
|
|
if (!valid_)
|
|
return;
|
|
PERFETTO_DCHECK(cur_ >= RingBuffer::rd_index_);
|
|
PERFETTO_DCHECK(cur_ <= RingBuffer::wr_index_);
|
|
RingBuffer::rd_index_.store(cur_, std::memory_order_release);
|
|
}
|
|
|
|
explicit operator bool() const { return cur_ < end_; }
|
|
const Record* operator->() const { return RingBuffer::At(cur_); }
|
|
const Record& operator*() const { return *operator->(); }
|
|
|
|
// This is for ++it. it++ is deliberately not supported.
|
|
ReadIterator& operator++() {
|
|
PERFETTO_DCHECK(cur_ < end_);
|
|
// Once a record has been read, mark it as free clearing its type_and_id,
|
|
// so if we encounter it in another read iteration while being written
|
|
// we know it's not fully written yet.
|
|
// The memory_order_relaxed below is enough because:
|
|
// - The reader is single-threaded and doesn't re-read the same records.
|
|
// - Before starting a read batch, the reader has an acquire barrier on
|
|
// |rd_index_|.
|
|
// - After terminating a read batch, the ~ReadIterator dtor updates the
|
|
// |rd_index_| with a release-store.
|
|
// - Reader and writer are typically kCapacity/2 apart. So unless an
|
|
// overrun happens a writer won't reuse a newly released record any time
|
|
// soon. If an overrun happens, everything is busted regardless.
|
|
At(cur_)->type_and_id.store(0, std::memory_order_relaxed);
|
|
++cur_;
|
|
return *this;
|
|
}
|
|
|
|
private:
|
|
friend class RingBuffer;
|
|
ReadIterator(uint64_t begin, uint64_t end)
|
|
: cur_(begin), end_(end), valid_(true) {}
|
|
ReadIterator& operator=(const ReadIterator&) = delete;
|
|
ReadIterator(const ReadIterator&) = delete;
|
|
|
|
uint64_t cur_;
|
|
uint64_t end_;
|
|
bool valid_;
|
|
};
|
|
|
|
static Record* At(uint64_t index) {
|
|
// Doesn't really have to be pow2, but if not the compiler will emit
|
|
// arithmetic operations to compute the modulo instead of a bitwise AND.
|
|
static_assert(!(kCapacity & (kCapacity - 1)), "kCapacity must be pow2");
|
|
PERFETTO_DCHECK(index >= rd_index_);
|
|
PERFETTO_DCHECK(index <= wr_index_);
|
|
return &records_[index % kCapacity];
|
|
}
|
|
|
|
// Must be called on the same task runner passed to Enable()
|
|
static ReadIterator GetReadIterator() {
|
|
PERFETTO_DCHECK(RingBuffer::IsOnValidTaskRunner());
|
|
return ReadIterator(rd_index_.load(std::memory_order_acquire),
|
|
wr_index_.load(std::memory_order_acquire));
|
|
}
|
|
|
|
static Record* AppendNewRecord();
|
|
static void Reset();
|
|
|
|
static bool has_overruns() {
|
|
return has_overruns_.load(std::memory_order_acquire);
|
|
}
|
|
|
|
// Can temporarily return a value >= kCapacity but is eventually consistent.
|
|
// This would happen in case of overruns until threads hit the --wr_index_
|
|
// in AppendNewRecord().
|
|
static uint64_t GetSizeForTesting() {
|
|
auto wr_index = wr_index_.load(std::memory_order_relaxed);
|
|
auto rd_index = rd_index_.load(std::memory_order_relaxed);
|
|
PERFETTO_DCHECK(wr_index >= rd_index);
|
|
return wr_index - rd_index;
|
|
}
|
|
|
|
private:
|
|
friend class ReadIterator;
|
|
|
|
// Returns true if the caller is on the task runner passed to Enable().
|
|
// Used only for DCHECKs.
|
|
static bool IsOnValidTaskRunner();
|
|
|
|
static std::array<Record, kCapacity> records_;
|
|
static std::atomic<bool> read_task_queued_;
|
|
static std::atomic<uint64_t> wr_index_;
|
|
static std::atomic<uint64_t> rd_index_;
|
|
static std::atomic<bool> has_overruns_;
|
|
static Record bankruptcy_record_; // Used in case of overruns.
|
|
};
|
|
|
|
inline void TraceCounter(uint32_t tag, uint16_t id, int32_t value) {
|
|
// memory_order_relaxed is okay because the storage has static lifetime.
|
|
// It is safe to accidentally log an event soon after disabling.
|
|
auto enabled_tags = g_enabled_tags.load(std::memory_order_relaxed);
|
|
if (PERFETTO_LIKELY((enabled_tags & tag) == 0))
|
|
return;
|
|
Record* record = RingBuffer::AppendNewRecord();
|
|
record->thread_id = static_cast<uint32_t>(base::GetThreadId());
|
|
record->set_timestamp(TraceTimeNowNs());
|
|
record->counter_value = value;
|
|
record->type_and_id.store(Record::kTypeCounter | id,
|
|
std::memory_order_release);
|
|
}
|
|
|
|
class ScopedEvent {
|
|
public:
|
|
ScopedEvent(uint32_t tag, uint16_t event_id) {
|
|
auto enabled_tags = g_enabled_tags.load(std::memory_order_relaxed);
|
|
if (PERFETTO_LIKELY((enabled_tags & tag) == 0))
|
|
return;
|
|
event_id_ = event_id;
|
|
record_ = RingBuffer::AppendNewRecord();
|
|
record_->thread_id = static_cast<uint32_t>(base::GetThreadId());
|
|
record_->set_timestamp(TraceTimeNowNs());
|
|
}
|
|
|
|
~ScopedEvent() {
|
|
if (PERFETTO_LIKELY(!record_))
|
|
return;
|
|
auto now = TraceTimeNowNs();
|
|
record_->duration_ns = static_cast<uint32_t>(now - record_->timestamp_ns());
|
|
record_->type_and_id.store(Record::kTypeEvent | event_id_,
|
|
std::memory_order_release);
|
|
}
|
|
|
|
private:
|
|
Record* record_ = nullptr;
|
|
uint16_t event_id_ = 0;
|
|
ScopedEvent(const ScopedEvent&) = delete;
|
|
ScopedEvent& operator=(const ScopedEvent&) = delete;
|
|
};
|
|
|
|
// Boilerplate to derive a unique variable name for the event.
|
|
#define PERFETTO_METATRACE_UID2(a, b) a##b
|
|
#define PERFETTO_METATRACE_UID(x) PERFETTO_METATRACE_UID2(metatrace_, x)
|
|
|
|
#define PERFETTO_METATRACE_SCOPED(TAG, ID) \
|
|
::perfetto::metatrace::ScopedEvent PERFETTO_METATRACE_UID(__COUNTER__)( \
|
|
::perfetto::metatrace::TAG, ::perfetto::metatrace::ID)
|
|
|
|
#define PERFETTO_METATRACE_COUNTER(TAG, ID, VALUE) \
|
|
::perfetto::metatrace::TraceCounter(::perfetto::metatrace::TAG, \
|
|
::perfetto::metatrace::ID, \
|
|
static_cast<int32_t>(VALUE))
|
|
|
|
} // namespace metatrace
|
|
} // namespace perfetto
|
|
|
|
#endif // INCLUDE_PERFETTO_EXT_BASE_METATRACE_H_
|
|
// gen_amalgamated begin header: include/perfetto/base/task_runner.h
|
|
/*
|
|
* Copyright (C) 2017 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
#ifndef INCLUDE_PERFETTO_BASE_TASK_RUNNER_H_
|
|
#define INCLUDE_PERFETTO_BASE_TASK_RUNNER_H_
|
|
|
|
#include <stdint.h>
|
|
|
|
#include <functional>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/base/export.h"
|
|
|
|
namespace perfetto {
|
|
namespace base {
|
|
|
|
// A generic interface to allow the library clients to interleave the execution
|
|
// of the tracing internals in their runtime environment.
|
|
// The expectation is that all tasks, which are queued either via PostTask() or
|
|
// AddFileDescriptorWatch(), are executed on the same sequence (either on the
|
|
// same thread, or on a thread pool that gives sequencing guarantees).
|
|
//
|
|
// Tasks are never executed synchronously inside PostTask and there is a full
|
|
// memory barrier between tasks.
|
|
//
|
|
// All methods of this interface can be called from any thread.
|
|
class PERFETTO_EXPORT TaskRunner {
|
|
public:
|
|
virtual ~TaskRunner();
|
|
|
|
// Schedule a task for immediate execution. Immediate tasks are always
|
|
// executed in the order they are posted. Can be called from any thread.
|
|
virtual void PostTask(std::function<void()>) = 0;
|
|
|
|
// Schedule a task for execution after |delay_ms|. Note that there is no
|
|
// strict ordering guarantee between immediate and delayed tasks. Can be
|
|
// called from any thread.
|
|
virtual void PostDelayedTask(std::function<void()>, uint32_t delay_ms) = 0;
|
|
|
|
// Schedule a task to run when |fd| becomes readable. The same |fd| can only
|
|
// be monitored by one function. Note that this function only needs to be
|
|
// implemented on platforms where the built-in ipc framework is used. Can be
|
|
// called from any thread.
|
|
// TODO(skyostil): Refactor this out of the shared interface.
|
|
virtual void AddFileDescriptorWatch(int fd, std::function<void()>) = 0;
|
|
|
|
// Remove a previously scheduled watch for |fd|. If this is run on the target
|
|
// thread of this TaskRunner, guarantees that the task registered to this fd
|
|
// will not be executed after this function call. Can be called from any
|
|
// thread.
|
|
virtual void RemoveFileDescriptorWatch(int fd) = 0;
|
|
|
|
// Checks if the current thread is the same thread where the TaskRunner's task
|
|
// run. This allows single threaded task runners (like the ones used in
|
|
// perfetto) to inform the caller that anything posted will run on the same
|
|
// thread/sequence. This can allow some callers to skip PostTask and instead
|
|
// directly execute the code. Can be called from any thread.
|
|
virtual bool RunsTasksOnCurrentThread() const = 0;
|
|
};
|
|
|
|
} // namespace base
|
|
} // namespace perfetto
|
|
|
|
#endif // INCLUDE_PERFETTO_BASE_TASK_RUNNER_H_
|
|
/*
|
|
* Copyright (C) 2018 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/metatrace.h"
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/base/compiler.h"
|
|
// gen_amalgamated expanded: #include "perfetto/base/task_runner.h"
|
|
// gen_amalgamated expanded: #include "perfetto/base/time.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/file_utils.h"
|
|
|
|
namespace perfetto {
|
|
namespace metatrace {
|
|
|
|
std::atomic<uint32_t> g_enabled_tags{0};
|
|
std::atomic<uint64_t> g_enabled_timestamp{0};
|
|
|
|
// static members
|
|
constexpr size_t RingBuffer::kCapacity;
|
|
std::array<Record, RingBuffer::kCapacity> RingBuffer::records_;
|
|
std::atomic<bool> RingBuffer::read_task_queued_;
|
|
std::atomic<uint64_t> RingBuffer::wr_index_;
|
|
std::atomic<uint64_t> RingBuffer::rd_index_;
|
|
std::atomic<bool> RingBuffer::has_overruns_;
|
|
Record RingBuffer::bankruptcy_record_;
|
|
|
|
constexpr uint16_t Record::kTypeMask;
|
|
constexpr uint16_t Record::kTypeCounter;
|
|
constexpr uint16_t Record::kTypeEvent;
|
|
|
|
namespace {
|
|
|
|
// std::function<> is not trivially de/constructible. This struct wraps it in a
|
|
// heap-allocated struct to avoid static initializers.
|
|
struct Delegate {
|
|
static Delegate* GetInstance() {
|
|
static Delegate* instance = new Delegate();
|
|
return instance;
|
|
}
|
|
|
|
base::TaskRunner* task_runner = nullptr;
|
|
std::function<void()> read_task;
|
|
};
|
|
|
|
} // namespace
|
|
|
|
bool Enable(std::function<void()> read_task,
|
|
base::TaskRunner* task_runner,
|
|
uint32_t tags) {
|
|
PERFETTO_DCHECK(read_task);
|
|
PERFETTO_DCHECK(task_runner->RunsTasksOnCurrentThread());
|
|
if (g_enabled_tags.load(std::memory_order_acquire))
|
|
return false;
|
|
|
|
Delegate* dg = Delegate::GetInstance();
|
|
dg->task_runner = task_runner;
|
|
dg->read_task = std::move(read_task);
|
|
RingBuffer::Reset();
|
|
g_enabled_timestamp.store(TraceTimeNowNs(), std::memory_order_relaxed);
|
|
g_enabled_tags.store(tags, std::memory_order_release);
|
|
return true;
|
|
}
|
|
|
|
void Disable() {
|
|
g_enabled_tags.store(0, std::memory_order_release);
|
|
Delegate* dg = Delegate::GetInstance();
|
|
PERFETTO_DCHECK(!dg->task_runner ||
|
|
dg->task_runner->RunsTasksOnCurrentThread());
|
|
dg->task_runner = nullptr;
|
|
dg->read_task = nullptr;
|
|
}
|
|
|
|
// static
|
|
void RingBuffer::Reset() {
|
|
bankruptcy_record_.clear();
|
|
for (Record& record : records_)
|
|
record.clear();
|
|
wr_index_ = 0;
|
|
rd_index_ = 0;
|
|
has_overruns_ = false;
|
|
read_task_queued_ = false;
|
|
}
|
|
|
|
// static
|
|
Record* RingBuffer::AppendNewRecord() {
|
|
auto wr_index = wr_index_.fetch_add(1, std::memory_order_acq_rel);
|
|
|
|
// rd_index can only monotonically increase, we don't care if we read an
|
|
// older value, we'll just hit the slow-path a bit earlier if it happens.
|
|
auto rd_index = rd_index_.load(std::memory_order_relaxed);
|
|
|
|
PERFETTO_DCHECK(wr_index >= rd_index);
|
|
auto size = wr_index - rd_index;
|
|
if (PERFETTO_LIKELY(size < kCapacity / 2))
|
|
return At(wr_index);
|
|
|
|
// Slow-path: Enqueue the read task and handle overruns.
|
|
bool expected = false;
|
|
if (RingBuffer::read_task_queued_.compare_exchange_strong(expected, true)) {
|
|
Delegate* dg = Delegate::GetInstance();
|
|
if (dg->task_runner) {
|
|
dg->task_runner->PostTask([] {
|
|
// Meta-tracing might have been disabled in the meantime.
|
|
auto read_task = Delegate::GetInstance()->read_task;
|
|
if (read_task)
|
|
read_task();
|
|
RingBuffer::read_task_queued_ = false;
|
|
});
|
|
}
|
|
}
|
|
|
|
if (PERFETTO_LIKELY(size < kCapacity))
|
|
return At(wr_index);
|
|
|
|
has_overruns_.store(true, std::memory_order_release);
|
|
wr_index_.fetch_sub(1, std::memory_order_acq_rel);
|
|
|
|
// In the case of overflows, threads will race writing on the same memory
|
|
// location and TSan will rightly complain. This is fine though because nobody
|
|
// will read the bankruptcy record and it's designed to contain garbage.
|
|
PERFETTO_ANNOTATE_BENIGN_RACE_SIZED(&bankruptcy_record_, sizeof(Record),
|
|
"nothing reads bankruptcy_record_")
|
|
return &bankruptcy_record_;
|
|
}
|
|
|
|
// static
|
|
bool RingBuffer::IsOnValidTaskRunner() {
|
|
auto* task_runner = Delegate::GetInstance()->task_runner;
|
|
return task_runner && task_runner->RunsTasksOnCurrentThread();
|
|
}
|
|
|
|
} // namespace metatrace
|
|
} // namespace perfetto
|
|
// gen_amalgamated begin source: src/base/paged_memory.cc
|
|
// gen_amalgamated begin header: include/perfetto/ext/base/paged_memory.h
|
|
// gen_amalgamated begin header: include/perfetto/ext/base/container_annotations.h
|
|
/*
|
|
* Copyright (C) 2018 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
#ifndef INCLUDE_PERFETTO_EXT_BASE_CONTAINER_ANNOTATIONS_H_
|
|
#define INCLUDE_PERFETTO_EXT_BASE_CONTAINER_ANNOTATIONS_H_
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/base/build_config.h"
|
|
|
|
// Windows ASAN doesn't currently support these annotations.
|
|
#if defined(ADDRESS_SANITIZER) && !PERFETTO_BUILDFLAG(PERFETTO_OS_WIN) && \
|
|
!defined(ADDRESS_SANITIZER_WITHOUT_INSTRUMENTATION)
|
|
|
|
#define ANNOTATE_NEW_BUFFER(buffer, capacity, new_size) \
|
|
if (buffer) { \
|
|
__sanitizer_annotate_contiguous_container(buffer, (buffer) + (capacity), \
|
|
(buffer) + (capacity), \
|
|
(buffer) + (new_size)); \
|
|
}
|
|
#define ANNOTATE_DELETE_BUFFER(buffer, capacity, old_size) \
|
|
if (buffer) { \
|
|
__sanitizer_annotate_contiguous_container(buffer, (buffer) + (capacity), \
|
|
(buffer) + (old_size), \
|
|
(buffer) + (capacity)); \
|
|
}
|
|
#define ANNOTATE_CHANGE_SIZE(buffer, capacity, old_size, new_size) \
|
|
if (buffer) { \
|
|
__sanitizer_annotate_contiguous_container(buffer, (buffer) + (capacity), \
|
|
(buffer) + (old_size), \
|
|
(buffer) + (new_size)); \
|
|
}
|
|
#define ANNOTATE_CHANGE_CAPACITY(buffer, old_capacity, buffer_size, \
|
|
new_capacity) \
|
|
ANNOTATE_DELETE_BUFFER(buffer, old_capacity, buffer_size); \
|
|
ANNOTATE_NEW_BUFFER(buffer, new_capacity, buffer_size);
|
|
// Annotations require buffers to begin on an 8-byte boundary.
|
|
#else // defined(ADDRESS_SANITIZER)
|
|
#define ANNOTATE_NEW_BUFFER(buffer, capacity, new_size)
|
|
#define ANNOTATE_DELETE_BUFFER(buffer, capacity, old_size)
|
|
#define ANNOTATE_CHANGE_SIZE(buffer, capacity, old_size, new_size)
|
|
#define ANNOTATE_CHANGE_CAPACITY(buffer, old_capacity, buffer_size, \
|
|
new_capacity)
|
|
#endif // defined(ADDRESS_SANITIZER)
|
|
|
|
#endif // INCLUDE_PERFETTO_EXT_BASE_CONTAINER_ANNOTATIONS_H_
|
|
/*
|
|
* Copyright (C) 2017 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
#ifndef INCLUDE_PERFETTO_EXT_BASE_PAGED_MEMORY_H_
|
|
#define INCLUDE_PERFETTO_EXT_BASE_PAGED_MEMORY_H_
|
|
|
|
#include <memory>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/base/build_config.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/container_annotations.h"
|
|
|
|
// We need to track the committed size on windows and when ASAN is enabled.
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_OS_WIN) || defined(ADDRESS_SANITIZER)
|
|
#define TRACK_COMMITTED_SIZE() 1
|
|
#else
|
|
#define TRACK_COMMITTED_SIZE() 0
|
|
#endif
|
|
|
|
namespace perfetto {
|
|
namespace base {
|
|
|
|
class PagedMemory {
|
|
public:
|
|
// Initializes an invalid PagedMemory pointing to nullptr.
|
|
PagedMemory();
|
|
|
|
~PagedMemory();
|
|
|
|
PagedMemory(PagedMemory&& other) noexcept;
|
|
PagedMemory& operator=(PagedMemory&& other);
|
|
|
|
enum AllocationFlags {
|
|
// By default, Allocate() crashes if the underlying mmap fails (e.g., if out
|
|
// of virtual address space). When this flag is provided, an invalid
|
|
// PagedMemory pointing to nullptr is returned in this case instead.
|
|
kMayFail = 1 << 0,
|
|
|
|
// By default, Allocate() commits the allocated memory immediately. When
|
|
// this flag is provided, the memory virtual address space may only be
|
|
// reserved and the user should call EnsureCommitted() before writing to
|
|
// memory addresses.
|
|
kDontCommit = 1 << 1,
|
|
};
|
|
|
|
// Allocates |size| bytes using mmap(MAP_ANONYMOUS). The returned memory is
|
|
// guaranteed to be page-aligned and guaranteed to be zeroed.
|
|
// For |flags|, see the AllocationFlags enum above.
|
|
static PagedMemory Allocate(size_t size, int flags = 0);
|
|
|
|
// Hint to the OS that the memory range is not needed and can be discarded.
|
|
// The memory remains accessible and its contents may be retained, or they
|
|
// may be zeroed. This function may be a NOP on some platforms. Returns true
|
|
// if implemented.
|
|
bool AdviseDontNeed(void* p, size_t size);
|
|
|
|
// Ensures that at least the first |committed_size| bytes of the allocated
|
|
// memory region are committed. The implementation may commit memory in larger
|
|
// chunks above |committed_size|. Crashes if the memory couldn't be committed.
|
|
#if TRACK_COMMITTED_SIZE()
|
|
void EnsureCommitted(size_t committed_size);
|
|
#else // TRACK_COMMITTED_SIZE()
|
|
void EnsureCommitted(size_t /*committed_size*/) {}
|
|
#endif // TRACK_COMMITTED_SIZE()
|
|
|
|
inline void* Get() const noexcept { return p_; }
|
|
inline bool IsValid() const noexcept { return !!p_; }
|
|
inline size_t size() const noexcept { return size_; }
|
|
|
|
private:
|
|
PagedMemory(char* p, size_t size);
|
|
|
|
PagedMemory(const PagedMemory&) = delete;
|
|
// Defaulted for implementation of move constructor + assignment.
|
|
PagedMemory& operator=(const PagedMemory&) = default;
|
|
|
|
char* p_ = nullptr;
|
|
|
|
// The size originally passed to Allocate(). The actual virtual memory
|
|
// reservation will be larger due to: (i) guard pages; (ii) rounding up to
|
|
// the system page size.
|
|
size_t size_ = 0;
|
|
|
|
#if TRACK_COMMITTED_SIZE()
|
|
size_t committed_size_ = 0u;
|
|
#endif // TRACK_COMMITTED_SIZE()
|
|
};
|
|
|
|
} // namespace base
|
|
} // namespace perfetto
|
|
|
|
#endif // INCLUDE_PERFETTO_EXT_BASE_PAGED_MEMORY_H_
|
|
/*
|
|
* Copyright (C) 2017 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/paged_memory.h"
|
|
|
|
#include <algorithm>
|
|
#include <cmath>
|
|
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
|
|
#include <Windows.h>
|
|
#else // PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
|
|
#include <sys/mman.h>
|
|
#endif // PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/base/logging.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/container_annotations.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/utils.h"
|
|
|
|
namespace perfetto {
|
|
namespace base {
|
|
|
|
namespace {
|
|
|
|
#if TRACK_COMMITTED_SIZE()
|
|
constexpr size_t kCommitChunkSize = 4 * 1024 * 1024; // 4MB
|
|
#endif
|
|
|
|
size_t RoundUpToSysPageSize(size_t req_size) {
|
|
const size_t page_size = GetSysPageSize();
|
|
return (req_size + page_size - 1) & ~(page_size - 1);
|
|
}
|
|
|
|
size_t GuardSize() {
|
|
return GetSysPageSize();
|
|
}
|
|
|
|
} // namespace
|
|
|
|
// static
|
|
PagedMemory PagedMemory::Allocate(size_t req_size, int flags) {
|
|
size_t rounded_up_size = RoundUpToSysPageSize(req_size);
|
|
PERFETTO_CHECK(rounded_up_size >= req_size);
|
|
size_t outer_size = rounded_up_size + GuardSize() * 2;
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
|
|
void* ptr = VirtualAlloc(nullptr, outer_size, MEM_RESERVE, PAGE_NOACCESS);
|
|
if (!ptr && (flags & kMayFail))
|
|
return PagedMemory();
|
|
PERFETTO_CHECK(ptr);
|
|
char* usable_region = reinterpret_cast<char*>(ptr) + GuardSize();
|
|
#else // PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
|
|
void* ptr = mmap(nullptr, outer_size, PROT_READ | PROT_WRITE,
|
|
MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
|
|
if (ptr == MAP_FAILED && (flags & kMayFail))
|
|
return PagedMemory();
|
|
PERFETTO_CHECK(ptr && ptr != MAP_FAILED);
|
|
char* usable_region = reinterpret_cast<char*>(ptr) + GuardSize();
|
|
int res = mprotect(ptr, GuardSize(), PROT_NONE);
|
|
res |= mprotect(usable_region + rounded_up_size, GuardSize(), PROT_NONE);
|
|
PERFETTO_CHECK(res == 0);
|
|
#endif // PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
|
|
|
|
auto memory = PagedMemory(usable_region, req_size);
|
|
#if TRACK_COMMITTED_SIZE()
|
|
size_t initial_commit = req_size;
|
|
if (flags & kDontCommit)
|
|
initial_commit = std::min(initial_commit, kCommitChunkSize);
|
|
memory.EnsureCommitted(initial_commit);
|
|
#endif // TRACK_COMMITTED_SIZE()
|
|
return memory;
|
|
}
|
|
|
|
PagedMemory::PagedMemory() {}
|
|
|
|
// clang-format off
|
|
PagedMemory::PagedMemory(char* p, size_t size) : p_(p), size_(size) {
|
|
ANNOTATE_NEW_BUFFER(p_, size_, committed_size_)
|
|
}
|
|
|
|
PagedMemory::PagedMemory(PagedMemory&& other) noexcept {
|
|
*this = other;
|
|
other.p_ = nullptr;
|
|
}
|
|
// clang-format on
|
|
|
|
PagedMemory& PagedMemory::operator=(PagedMemory&& other) {
|
|
this->~PagedMemory();
|
|
new (this) PagedMemory(std::move(other));
|
|
return *this;
|
|
}
|
|
|
|
PagedMemory::~PagedMemory() {
|
|
if (!p_)
|
|
return;
|
|
PERFETTO_CHECK(size_);
|
|
char* start = p_ - GuardSize();
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
|
|
BOOL res = VirtualFree(start, 0, MEM_RELEASE);
|
|
PERFETTO_CHECK(res != 0);
|
|
#else // PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
|
|
const size_t outer_size = RoundUpToSysPageSize(size_) + GuardSize() * 2;
|
|
int res = munmap(start, outer_size);
|
|
PERFETTO_CHECK(res == 0);
|
|
#endif // PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
|
|
ANNOTATE_DELETE_BUFFER(p_, size_, committed_size_)
|
|
}
|
|
|
|
bool PagedMemory::AdviseDontNeed(void* p, size_t size) {
|
|
PERFETTO_DCHECK(p_);
|
|
PERFETTO_DCHECK(p >= p_);
|
|
PERFETTO_DCHECK(static_cast<char*>(p) + size <= p_ + size_);
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_OS_WIN) || PERFETTO_BUILDFLAG(PERFETTO_OS_NACL)
|
|
// Discarding pages on Windows has more CPU cost than is justified for the
|
|
// possible memory savings.
|
|
return false;
|
|
#else // PERFETTO_BUILDFLAG(PERFETTO_OS_WIN) ||
|
|
// PERFETTO_BUILDFLAG(PERFETTO_OS_NACL)
|
|
// http://man7.org/linux/man-pages/man2/madvise.2.html
|
|
int res = madvise(p, size, MADV_DONTNEED);
|
|
PERFETTO_DCHECK(res == 0);
|
|
return true;
|
|
#endif // PERFETTO_BUILDFLAG(PERFETTO_OS_WIN) ||
|
|
// PERFETTO_BUILDFLAG(PERFETTO_OS_NACL)
|
|
}
|
|
|
|
#if TRACK_COMMITTED_SIZE()
|
|
void PagedMemory::EnsureCommitted(size_t committed_size) {
|
|
PERFETTO_DCHECK(committed_size > 0u);
|
|
PERFETTO_DCHECK(committed_size <= size_);
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
|
|
if (committed_size_ >= committed_size)
|
|
return;
|
|
// Rounding up.
|
|
size_t delta = committed_size - committed_size_;
|
|
size_t num_additional_chunks =
|
|
(delta + kCommitChunkSize - 1) / kCommitChunkSize;
|
|
PERFETTO_DCHECK(num_additional_chunks * kCommitChunkSize >= delta);
|
|
// Don't commit more than the total size.
|
|
size_t commit_size = std::min(num_additional_chunks * kCommitChunkSize,
|
|
size_ - committed_size_);
|
|
void* res = VirtualAlloc(p_ + committed_size_, commit_size, MEM_COMMIT,
|
|
PAGE_READWRITE);
|
|
PERFETTO_CHECK(res);
|
|
ANNOTATE_CHANGE_SIZE(p_, size_, committed_size_,
|
|
committed_size_ + commit_size)
|
|
committed_size_ += commit_size;
|
|
#else // PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
|
|
// mmap commits automatically as needed, so we only track here for ASAN.
|
|
committed_size = std::max(committed_size_, committed_size);
|
|
ANNOTATE_CHANGE_SIZE(p_, size_, committed_size_, committed_size)
|
|
committed_size_ = committed_size;
|
|
#endif // PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
|
|
}
|
|
#endif // TRACK_COMMITTED_SIZE()
|
|
|
|
} // namespace base
|
|
} // namespace perfetto
|
|
// gen_amalgamated begin source: src/base/string_splitter.cc
|
|
// gen_amalgamated begin header: include/perfetto/ext/base/string_splitter.h
|
|
/*
|
|
* Copyright (C) 2018 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
#ifndef INCLUDE_PERFETTO_EXT_BASE_STRING_SPLITTER_H_
|
|
#define INCLUDE_PERFETTO_EXT_BASE_STRING_SPLITTER_H_
|
|
|
|
#include <string>
|
|
|
|
namespace perfetto {
|
|
namespace base {
|
|
|
|
// C++ version of strtok(). Splits a string without making copies or any heap
|
|
// allocations. Destructs the original string passed in input.
|
|
// Supports the special case of using \0 as a delimiter.
|
|
// The token returned in output are valid as long as the input string is valid.
|
|
class StringSplitter {
|
|
public:
|
|
// Can take ownership of the string if passed via std::move(), e.g.:
|
|
// StringSplitter(std::move(str), '\n');
|
|
StringSplitter(std::string, char delimiter);
|
|
|
|
// Splits a C-string. The input string will be forcefully null-terminated (so
|
|
// str[size - 1] should be == '\0' or the last char will be truncated).
|
|
StringSplitter(char* str, size_t size, char delimiter);
|
|
|
|
// Splits the current token from an outer StringSplitter instance. This is to
|
|
// chain splitters as follows:
|
|
// for (base::StringSplitter lines(x, '\n'); ss.Next();)
|
|
// for (base::StringSplitter words(&lines, ' '); words.Next();)
|
|
StringSplitter(StringSplitter*, char delimiter);
|
|
|
|
// Returns true if a token is found (in which case it will be stored in
|
|
// cur_token()), false if no more tokens are found.
|
|
bool Next();
|
|
|
|
// Returns the current token iff last call to Next() returned true. In this
|
|
// case it guarantees that the returned string is always null terminated.
|
|
// In all other cases (before the 1st call to Next() and after Next() returns
|
|
// false) returns nullptr.
|
|
char* cur_token() { return cur_; }
|
|
|
|
// Returns the length of the current token (excluding the null terminator).
|
|
size_t cur_token_size() const { return cur_size_; }
|
|
|
|
private:
|
|
StringSplitter(const StringSplitter&) = delete;
|
|
StringSplitter& operator=(const StringSplitter&) = delete;
|
|
void Initialize(char* str, size_t size);
|
|
|
|
std::string str_;
|
|
char* cur_;
|
|
size_t cur_size_;
|
|
char* next_;
|
|
char* end_; // STL-style, points one past the last char.
|
|
const char delimiter_;
|
|
};
|
|
|
|
} // namespace base
|
|
} // namespace perfetto
|
|
|
|
#endif // INCLUDE_PERFETTO_EXT_BASE_STRING_SPLITTER_H_
|
|
/*
|
|
* Copyright (C) 2018 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/string_splitter.h"
|
|
|
|
#include <utility>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/base/logging.h"
|
|
|
|
namespace perfetto {
|
|
namespace base {
|
|
|
|
StringSplitter::StringSplitter(std::string str, char delimiter)
|
|
: str_(std::move(str)), delimiter_(delimiter) {
|
|
// It's legal to access str[str.size()] in C++11 (it always returns \0),
|
|
// hence the +1 (which becomes just size() after the -1 in Initialize()).
|
|
Initialize(&str_[0], str_.size() + 1);
|
|
}
|
|
|
|
StringSplitter::StringSplitter(char* str, size_t size, char delimiter)
|
|
: delimiter_(delimiter) {
|
|
Initialize(str, size);
|
|
}
|
|
|
|
StringSplitter::StringSplitter(StringSplitter* outer, char delimiter)
|
|
: delimiter_(delimiter) {
|
|
Initialize(outer->cur_token(), outer->cur_token_size() + 1);
|
|
}
|
|
|
|
void StringSplitter::Initialize(char* str, size_t size) {
|
|
PERFETTO_DCHECK(!size || str);
|
|
next_ = str;
|
|
end_ = str + size;
|
|
cur_ = nullptr;
|
|
cur_size_ = 0;
|
|
if (size)
|
|
next_[size - 1] = '\0';
|
|
}
|
|
|
|
bool StringSplitter::Next() {
|
|
for (; next_ < end_; next_++) {
|
|
if (*next_ == delimiter_)
|
|
continue;
|
|
cur_ = next_;
|
|
for (;; next_++) {
|
|
if (*next_ == delimiter_) {
|
|
cur_size_ = static_cast<size_t>(next_ - cur_);
|
|
*(next_++) = '\0';
|
|
break;
|
|
}
|
|
if (*next_ == '\0') {
|
|
cur_size_ = static_cast<size_t>(next_ - cur_);
|
|
next_ = end_;
|
|
break;
|
|
}
|
|
}
|
|
if (*cur_)
|
|
return true;
|
|
break;
|
|
}
|
|
cur_ = nullptr;
|
|
cur_size_ = 0;
|
|
return false;
|
|
}
|
|
|
|
} // namespace base
|
|
} // namespace perfetto
|
|
// gen_amalgamated begin source: src/base/string_utils.cc
|
|
// gen_amalgamated begin header: include/perfetto/ext/base/string_utils.h
|
|
// gen_amalgamated begin header: include/perfetto/ext/base/optional.h
|
|
/*
|
|
* Copyright (C) 2018 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
#ifndef INCLUDE_PERFETTO_EXT_BASE_OPTIONAL_H_
|
|
#define INCLUDE_PERFETTO_EXT_BASE_OPTIONAL_H_
|
|
|
|
#include <functional>
|
|
#include <type_traits>
|
|
#include <utility>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/base/logging.h"
|
|
|
|
namespace perfetto {
|
|
namespace base {
|
|
|
|
// Specification:
|
|
// http://en.cppreference.com/w/cpp/utility/optional/in_place_t
|
|
struct in_place_t {};
|
|
|
|
// Specification:
|
|
// http://en.cppreference.com/w/cpp/utility/optional/nullopt_t
|
|
struct nullopt_t {
|
|
constexpr explicit nullopt_t(int) {}
|
|
};
|
|
|
|
// Specification:
|
|
// http://en.cppreference.com/w/cpp/utility/optional/in_place
|
|
constexpr in_place_t in_place = {};
|
|
|
|
// Specification:
|
|
// http://en.cppreference.com/w/cpp/utility/optional/nullopt
|
|
constexpr nullopt_t nullopt(0);
|
|
|
|
// Forward declaration, which is referred by following helpers.
|
|
template <typename T>
|
|
class Optional;
|
|
|
|
namespace internal {
|
|
|
|
template <typename T, bool = std::is_trivially_destructible<T>::value>
|
|
struct OptionalStorageBase {
|
|
// Initializing |empty_| here instead of using default member initializing
|
|
// to avoid errors in g++ 4.8.
|
|
constexpr OptionalStorageBase() : empty_('\0') {}
|
|
|
|
template <class... Args>
|
|
constexpr explicit OptionalStorageBase(in_place_t, Args&&... args)
|
|
: is_populated_(true), value_(std::forward<Args>(args)...) {}
|
|
|
|
// When T is not trivially destructible we must call its
|
|
// destructor before deallocating its memory.
|
|
// Note that this hides the (implicitly declared) move constructor, which
|
|
// would be used for constexpr move constructor in OptionalStorage<T>.
|
|
// It is needed iff T is trivially move constructible. However, the current
|
|
// is_trivially_{copy,move}_constructible implementation requires
|
|
// is_trivially_destructible (which looks a bug, cf:
|
|
// https://gcc.gnu.org/bugzilla/show_bug.cgi?id=51452 and
|
|
// http://cplusplus.github.io/LWG/lwg-active.html#2116), so it is not
|
|
// necessary for this case at the moment. Please see also the destructor
|
|
// comment in "is_trivially_destructible = true" specialization below.
|
|
~OptionalStorageBase() {
|
|
if (is_populated_)
|
|
value_.~T();
|
|
}
|
|
|
|
template <class... Args>
|
|
void Init(Args&&... args) {
|
|
PERFETTO_DCHECK(!is_populated_);
|
|
::new (&value_) T(std::forward<Args>(args)...);
|
|
is_populated_ = true;
|
|
}
|
|
|
|
bool is_populated_ = false;
|
|
union {
|
|
// |empty_| exists so that the union will always be initialized, even when
|
|
// it doesn't contain a value. Union members must be initialized for the
|
|
// constructor to be 'constexpr'.
|
|
char empty_;
|
|
T value_;
|
|
};
|
|
};
|
|
|
|
template <typename T>
|
|
struct OptionalStorageBase<T, true /* trivially destructible */> {
|
|
// Initializing |empty_| here instead of using default member initializing
|
|
// to avoid errors in g++ 4.8.
|
|
constexpr OptionalStorageBase() : empty_('\0') {}
|
|
|
|
template <class... Args>
|
|
constexpr explicit OptionalStorageBase(in_place_t, Args&&... args)
|
|
: is_populated_(true), value_(std::forward<Args>(args)...) {}
|
|
|
|
// When T is trivially destructible (i.e. its destructor does nothing) there
|
|
// is no need to call it. Implicitly defined destructor is trivial, because
|
|
// both members (bool and union containing only variants which are trivially
|
|
// destructible) are trivially destructible.
|
|
// Explicitly-defaulted destructor is also trivial, but do not use it here,
|
|
// because it hides the implicit move constructor. It is needed to implement
|
|
// constexpr move constructor in OptionalStorage iff T is trivially move
|
|
// constructible. Note that, if T is trivially move constructible, the move
|
|
// constructor of OptionalStorageBase<T> is also implicitly defined and it is
|
|
// trivially move constructor. If T is not trivially move constructible,
|
|
// "not declaring move constructor without destructor declaration" here means
|
|
// "delete move constructor", which works because any move constructor of
|
|
// OptionalStorage will not refer to it in that case.
|
|
|
|
template <class... Args>
|
|
void Init(Args&&... args) {
|
|
PERFETTO_DCHECK(!is_populated_);
|
|
::new (&value_) T(std::forward<Args>(args)...);
|
|
is_populated_ = true;
|
|
}
|
|
|
|
bool is_populated_ = false;
|
|
union {
|
|
// |empty_| exists so that the union will always be initialized, even when
|
|
// it doesn't contain a value. Union members must be initialized for the
|
|
// constructor to be 'constexpr'.
|
|
char empty_;
|
|
T value_;
|
|
};
|
|
};
|
|
|
|
// Implement conditional constexpr copy and move constructors. These are
|
|
// constexpr if is_trivially_{copy,move}_constructible<T>::value is true
|
|
// respectively. If each is true, the corresponding constructor is defined as
|
|
// "= default;", which generates a constexpr constructor (In this case,
|
|
// the condition of constexpr-ness is satisfied because the base class also has
|
|
// compiler generated constexpr {copy,move} constructors). Note that
|
|
// placement-new is prohibited in constexpr.
|
|
template <typename T, bool = std::is_trivially_copy_constructible<T>::value>
|
|
struct OptionalStorage : OptionalStorageBase<T> {
|
|
// This is no trivially {copy,move} constructible case. Other cases are
|
|
// defined below as specializations.
|
|
|
|
// Accessing the members of template base class requires explicit
|
|
// declaration.
|
|
using OptionalStorageBase<T>::is_populated_;
|
|
using OptionalStorageBase<T>::value_;
|
|
using OptionalStorageBase<T>::Init;
|
|
|
|
// Inherit constructors (specifically, the in_place constructor).
|
|
using OptionalStorageBase<T>::OptionalStorageBase;
|
|
|
|
// User defined constructor deletes the default constructor.
|
|
// Define it explicitly.
|
|
OptionalStorage() = default;
|
|
|
|
OptionalStorage(const OptionalStorage& other) : OptionalStorageBase<T>() {
|
|
if (other.is_populated_)
|
|
Init(other.value_);
|
|
}
|
|
|
|
OptionalStorage(OptionalStorage&& other) noexcept(
|
|
std::is_nothrow_move_constructible<T>::value) {
|
|
if (other.is_populated_)
|
|
Init(std::move(other.value_));
|
|
}
|
|
};
|
|
|
|
template <typename T>
|
|
struct OptionalStorage<T, true /* trivially copy constructible */>
|
|
: OptionalStorageBase<T> {
|
|
using OptionalStorageBase<T>::is_populated_;
|
|
using OptionalStorageBase<T>::value_;
|
|
using OptionalStorageBase<T>::Init;
|
|
using OptionalStorageBase<T>::OptionalStorageBase;
|
|
|
|
OptionalStorage() = default;
|
|
OptionalStorage(const OptionalStorage& other) = default;
|
|
|
|
OptionalStorage(OptionalStorage&& other) noexcept(
|
|
std::is_nothrow_move_constructible<T>::value) {
|
|
if (other.is_populated_)
|
|
Init(std::move(other.value_));
|
|
}
|
|
};
|
|
|
|
// Base class to support conditionally usable copy-/move- constructors
|
|
// and assign operators.
|
|
template <typename T>
|
|
class OptionalBase {
|
|
// This class provides implementation rather than public API, so everything
|
|
// should be hidden. Often we use composition, but we cannot in this case
|
|
// because of C++ language restriction.
|
|
protected:
|
|
constexpr OptionalBase() = default;
|
|
constexpr OptionalBase(const OptionalBase& other) = default;
|
|
constexpr OptionalBase(OptionalBase&& other) = default;
|
|
|
|
template <class... Args>
|
|
constexpr explicit OptionalBase(in_place_t, Args&&... args)
|
|
: storage_(in_place, std::forward<Args>(args)...) {}
|
|
|
|
// Implementation of converting constructors.
|
|
template <typename U>
|
|
explicit OptionalBase(const OptionalBase<U>& other) {
|
|
if (other.storage_.is_populated_)
|
|
storage_.Init(other.storage_.value_);
|
|
}
|
|
|
|
template <typename U>
|
|
explicit OptionalBase(OptionalBase<U>&& other) {
|
|
if (other.storage_.is_populated_)
|
|
storage_.Init(std::move(other.storage_.value_));
|
|
}
|
|
|
|
~OptionalBase() = default;
|
|
|
|
OptionalBase& operator=(const OptionalBase& other) {
|
|
CopyAssign(other);
|
|
return *this;
|
|
}
|
|
|
|
OptionalBase& operator=(OptionalBase&& other) noexcept(
|
|
std::is_nothrow_move_assignable<T>::value&&
|
|
std::is_nothrow_move_constructible<T>::value) {
|
|
MoveAssign(std::move(other));
|
|
return *this;
|
|
}
|
|
|
|
template <typename U>
|
|
void CopyAssign(const OptionalBase<U>& other) {
|
|
if (other.storage_.is_populated_)
|
|
InitOrAssign(other.storage_.value_);
|
|
else
|
|
FreeIfNeeded();
|
|
}
|
|
|
|
template <typename U>
|
|
void MoveAssign(OptionalBase<U>&& other) {
|
|
if (other.storage_.is_populated_)
|
|
InitOrAssign(std::move(other.storage_.value_));
|
|
else
|
|
FreeIfNeeded();
|
|
}
|
|
|
|
template <typename U>
|
|
void InitOrAssign(U&& value) {
|
|
if (storage_.is_populated_)
|
|
storage_.value_ = std::forward<U>(value);
|
|
else
|
|
storage_.Init(std::forward<U>(value));
|
|
}
|
|
|
|
void FreeIfNeeded() {
|
|
if (!storage_.is_populated_)
|
|
return;
|
|
storage_.value_.~T();
|
|
storage_.is_populated_ = false;
|
|
}
|
|
|
|
// For implementing conversion, allow access to other typed OptionalBase
|
|
// class.
|
|
template <typename U>
|
|
friend class OptionalBase;
|
|
|
|
OptionalStorage<T> storage_;
|
|
};
|
|
|
|
// The following {Copy,Move}{Constructible,Assignable} structs are helpers to
|
|
// implement constructor/assign-operator overloading. Specifically, if T is
|
|
// is not movable but copyable, Optional<T>'s move constructor should not
|
|
// participate in overload resolution. This inheritance trick implements that.
|
|
template <bool is_copy_constructible>
|
|
struct CopyConstructible {};
|
|
|
|
template <>
|
|
struct CopyConstructible<false> {
|
|
constexpr CopyConstructible() = default;
|
|
constexpr CopyConstructible(const CopyConstructible&) = delete;
|
|
constexpr CopyConstructible(CopyConstructible&&) = default;
|
|
CopyConstructible& operator=(const CopyConstructible&) = default;
|
|
CopyConstructible& operator=(CopyConstructible&&) = default;
|
|
};
|
|
|
|
template <bool is_move_constructible>
|
|
struct MoveConstructible {};
|
|
|
|
template <>
|
|
struct MoveConstructible<false> {
|
|
constexpr MoveConstructible() = default;
|
|
constexpr MoveConstructible(const MoveConstructible&) = default;
|
|
constexpr MoveConstructible(MoveConstructible&&) = delete;
|
|
MoveConstructible& operator=(const MoveConstructible&) = default;
|
|
MoveConstructible& operator=(MoveConstructible&&) = default;
|
|
};
|
|
|
|
template <bool is_copy_assignable>
|
|
struct CopyAssignable {};
|
|
|
|
template <>
|
|
struct CopyAssignable<false> {
|
|
constexpr CopyAssignable() = default;
|
|
constexpr CopyAssignable(const CopyAssignable&) = default;
|
|
constexpr CopyAssignable(CopyAssignable&&) = default;
|
|
CopyAssignable& operator=(const CopyAssignable&) = delete;
|
|
CopyAssignable& operator=(CopyAssignable&&) = default;
|
|
};
|
|
|
|
template <bool is_move_assignable>
|
|
struct MoveAssignable {};
|
|
|
|
template <>
|
|
struct MoveAssignable<false> {
|
|
constexpr MoveAssignable() = default;
|
|
constexpr MoveAssignable(const MoveAssignable&) = default;
|
|
constexpr MoveAssignable(MoveAssignable&&) = default;
|
|
MoveAssignable& operator=(const MoveAssignable&) = default;
|
|
MoveAssignable& operator=(MoveAssignable&&) = delete;
|
|
};
|
|
|
|
// Helper to conditionally enable converting constructors and assign operators.
|
|
template <typename T, typename U>
|
|
struct IsConvertibleFromOptional
|
|
: std::integral_constant<
|
|
bool,
|
|
std::is_constructible<T, Optional<U>&>::value ||
|
|
std::is_constructible<T, const Optional<U>&>::value ||
|
|
std::is_constructible<T, Optional<U>&&>::value ||
|
|
std::is_constructible<T, const Optional<U>&&>::value ||
|
|
std::is_convertible<Optional<U>&, T>::value ||
|
|
std::is_convertible<const Optional<U>&, T>::value ||
|
|
std::is_convertible<Optional<U>&&, T>::value ||
|
|
std::is_convertible<const Optional<U>&&, T>::value> {};
|
|
|
|
template <typename T, typename U>
|
|
struct IsAssignableFromOptional
|
|
: std::integral_constant<
|
|
bool,
|
|
IsConvertibleFromOptional<T, U>::value ||
|
|
std::is_assignable<T&, Optional<U>&>::value ||
|
|
std::is_assignable<T&, const Optional<U>&>::value ||
|
|
std::is_assignable<T&, Optional<U>&&>::value ||
|
|
std::is_assignable<T&, const Optional<U>&&>::value> {};
|
|
|
|
// Forward compatibility for C++17.
|
|
// Introduce one more deeper nested namespace to avoid leaking using std::swap.
|
|
namespace swappable_impl {
|
|
using std::swap;
|
|
|
|
struct IsSwappableImpl {
|
|
// Tests if swap can be called. Check<T&>(0) returns true_type iff swap is
|
|
// available for T. Otherwise, Check's overload resolution falls back to
|
|
// Check(...) declared below thanks to SFINAE, so returns false_type.
|
|
template <typename T>
|
|
static auto Check(int)
|
|
-> decltype(swap(std::declval<T>(), std::declval<T>()), std::true_type());
|
|
|
|
template <typename T>
|
|
static std::false_type Check(...);
|
|
};
|
|
} // namespace swappable_impl
|
|
|
|
template <typename T>
|
|
struct IsSwappable : decltype(swappable_impl::IsSwappableImpl::Check<T&>(0)) {};
|
|
|
|
// Forward compatibility for C++20.
|
|
template <typename T>
|
|
using RemoveCvRefT =
|
|
typename std::remove_cv<typename std::remove_reference<T>::type>::type;
|
|
|
|
} // namespace internal
|
|
|
|
// On Windows, by default, empty-base class optimization does not work,
|
|
// which means even if the base class is empty struct, it still consumes one
|
|
// byte for its body. __declspec(empty_bases) enables the optimization.
|
|
// cf)
|
|
// https://blogs.msdn.microsoft.com/vcblog/2016/03/30/optimizing-the-layout-of-empty-base-classes-in-vs2015-update-2-3/
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_OS_WIN) && \
|
|
!PERFETTO_BUILDFLAG(PERFETTO_COMPILER_GCC)
|
|
#define OPTIONAL_DECLSPEC_EMPTY_BASES __declspec(empty_bases)
|
|
#else
|
|
#define OPTIONAL_DECLSPEC_EMPTY_BASES
|
|
#endif
|
|
|
|
// base::Optional is a Chromium version of the C++17 optional class:
|
|
// std::optional documentation:
|
|
// http://en.cppreference.com/w/cpp/utility/optional
|
|
// Chromium documentation:
|
|
// https://chromium.googlesource.com/chromium/src/+/master/docs/optional.md
|
|
//
|
|
// These are the differences between the specification and the implementation:
|
|
// - Constructors do not use 'constexpr' as it is a C++14 extension.
|
|
// - 'constexpr' might be missing in some places for reasons specified locally.
|
|
// - No exceptions are thrown, because they are banned from Chromium.
|
|
// Marked noexcept for only move constructor and move assign operators.
|
|
// - All the non-members are in the 'base' namespace instead of 'std'.
|
|
//
|
|
// Note that T cannot have a constructor T(Optional<T>) etc. Optional<T>
|
|
// PERFETTO_CHECKs T's constructor (specifically via IsConvertibleFromOptional),
|
|
// and in the PERFETTO_CHECK whether T can be constructible from Optional<T>,
|
|
// which is recursive so it does not work. As of Feb 2018, std::optional C++17
|
|
// implementation in both clang and gcc has same limitation. MSVC SFINAE looks
|
|
// to have different behavior, but anyway it reports an error, too.
|
|
//
|
|
// This file is a modified version of optional.h from Chromium at revision
|
|
// 5e71bd454e60511c1293c0c686544aaa76094424. The changes remove C++14/C++17
|
|
// specific code and replace with C++11 counterparts.
|
|
template <typename T>
|
|
class OPTIONAL_DECLSPEC_EMPTY_BASES Optional
|
|
: public internal::OptionalBase<T>,
|
|
public internal::CopyConstructible<std::is_copy_constructible<T>::value>,
|
|
public internal::MoveConstructible<std::is_move_constructible<T>::value>,
|
|
public internal::CopyAssignable<std::is_copy_constructible<T>::value &&
|
|
std::is_copy_assignable<T>::value>,
|
|
public internal::MoveAssignable<std::is_move_constructible<T>::value &&
|
|
std::is_move_assignable<T>::value> {
|
|
public:
|
|
#undef OPTIONAL_DECLSPEC_EMPTY_BASES
|
|
using value_type = T;
|
|
|
|
// Defer default/copy/move constructor implementation to OptionalBase.
|
|
constexpr Optional() = default;
|
|
constexpr Optional(const Optional& other) = default;
|
|
constexpr Optional(Optional&& other) noexcept(
|
|
std::is_nothrow_move_constructible<T>::value) = default;
|
|
|
|
constexpr Optional(nullopt_t) {} // NOLINT(runtime/explicit)
|
|
|
|
// Converting copy constructor. "explicit" only if
|
|
// std::is_convertible<const U&, T>::value is false. It is implemented by
|
|
// declaring two almost same constructors, but that condition in enable_if_t
|
|
// is different, so that either one is chosen, thanks to SFINAE.
|
|
template <typename U,
|
|
typename std::enable_if<
|
|
std::is_constructible<T, const U&>::value &&
|
|
!internal::IsConvertibleFromOptional<T, U>::value &&
|
|
std::is_convertible<const U&, T>::value,
|
|
bool>::type = false>
|
|
Optional(const Optional<U>& other) : internal::OptionalBase<T>(other) {}
|
|
|
|
template <typename U,
|
|
typename std::enable_if<
|
|
std::is_constructible<T, const U&>::value &&
|
|
!internal::IsConvertibleFromOptional<T, U>::value &&
|
|
!std::is_convertible<const U&, T>::value,
|
|
bool>::type = false>
|
|
explicit Optional(const Optional<U>& other)
|
|
: internal::OptionalBase<T>(other) {}
|
|
|
|
// Converting move constructor. Similar to converting copy constructor,
|
|
// declaring two (explicit and non-explicit) constructors.
|
|
template <typename U,
|
|
typename std::enable_if<
|
|
std::is_constructible<T, U&&>::value &&
|
|
!internal::IsConvertibleFromOptional<T, U>::value &&
|
|
std::is_convertible<U&&, T>::value,
|
|
bool>::type = false>
|
|
Optional(Optional<U>&& other) : internal::OptionalBase<T>(std::move(other)) {}
|
|
|
|
template <typename U,
|
|
typename std::enable_if<
|
|
std::is_constructible<T, U&&>::value &&
|
|
!internal::IsConvertibleFromOptional<T, U>::value &&
|
|
!std::is_convertible<U&&, T>::value,
|
|
bool>::type = false>
|
|
explicit Optional(Optional<U>&& other)
|
|
: internal::OptionalBase<T>(std::move(other)) {}
|
|
|
|
template <class... Args>
|
|
constexpr explicit Optional(in_place_t, Args&&... args)
|
|
: internal::OptionalBase<T>(in_place, std::forward<Args>(args)...) {}
|
|
|
|
template <class U,
|
|
class... Args,
|
|
class = typename std::enable_if<
|
|
std::is_constructible<value_type,
|
|
std::initializer_list<U>&,
|
|
Args...>::value>::type>
|
|
constexpr explicit Optional(in_place_t,
|
|
std::initializer_list<U> il,
|
|
Args&&... args)
|
|
: internal::OptionalBase<T>(in_place, il, std::forward<Args>(args)...) {}
|
|
|
|
// Forward value constructor. Similar to converting constructors,
|
|
// conditionally explicit.
|
|
template <
|
|
typename U = value_type,
|
|
typename std::enable_if<
|
|
std::is_constructible<T, U&&>::value &&
|
|
!std::is_same<internal::RemoveCvRefT<U>, in_place_t>::value &&
|
|
!std::is_same<internal::RemoveCvRefT<U>, Optional<T>>::value &&
|
|
std::is_convertible<U&&, T>::value,
|
|
bool>::type = false>
|
|
constexpr Optional(U&& value)
|
|
: internal::OptionalBase<T>(in_place, std::forward<U>(value)) {}
|
|
|
|
template <
|
|
typename U = value_type,
|
|
typename std::enable_if<
|
|
std::is_constructible<T, U&&>::value &&
|
|
!std::is_same<internal::RemoveCvRefT<U>, in_place_t>::value &&
|
|
!std::is_same<internal::RemoveCvRefT<U>, Optional<T>>::value &&
|
|
!std::is_convertible<U&&, T>::value,
|
|
bool>::type = false>
|
|
constexpr explicit Optional(U&& value)
|
|
: internal::OptionalBase<T>(in_place, std::forward<U>(value)) {}
|
|
|
|
~Optional() = default;
|
|
|
|
// Defer copy-/move- assign operator implementation to OptionalBase.
|
|
Optional& operator=(const Optional& other) = default;
|
|
Optional& operator=(Optional&& other) noexcept(
|
|
std::is_nothrow_move_assignable<T>::value&&
|
|
std::is_nothrow_move_constructible<T>::value) = default;
|
|
|
|
Optional& operator=(nullopt_t) {
|
|
FreeIfNeeded();
|
|
return *this;
|
|
}
|
|
|
|
// Perfect-forwarded assignment.
|
|
template <typename U>
|
|
typename std::enable_if<
|
|
!std::is_same<internal::RemoveCvRefT<U>, Optional<T>>::value &&
|
|
std::is_constructible<T, U>::value &&
|
|
std::is_assignable<T&, U>::value &&
|
|
(!std::is_scalar<T>::value ||
|
|
!std::is_same<typename std::decay<U>::type, T>::value),
|
|
Optional&>::type
|
|
operator=(U&& value) {
|
|
InitOrAssign(std::forward<U>(value));
|
|
return *this;
|
|
}
|
|
|
|
// Copy assign the state of other.
|
|
template <typename U>
|
|
typename std::enable_if<!internal::IsAssignableFromOptional<T, U>::value &&
|
|
std::is_constructible<T, const U&>::value &&
|
|
std::is_assignable<T&, const U&>::value,
|
|
Optional&>::type
|
|
operator=(const Optional<U>& other) {
|
|
CopyAssign(other);
|
|
return *this;
|
|
}
|
|
|
|
// Move assign the state of other.
|
|
template <typename U>
|
|
typename std::enable_if<!internal::IsAssignableFromOptional<T, U>::value &&
|
|
std::is_constructible<T, U>::value &&
|
|
std::is_assignable<T&, U>::value,
|
|
Optional&>::type
|
|
operator=(Optional<U>&& other) {
|
|
MoveAssign(std::move(other));
|
|
return *this;
|
|
}
|
|
|
|
const T* operator->() const {
|
|
PERFETTO_DCHECK(storage_.is_populated_);
|
|
return &storage_.value_;
|
|
}
|
|
|
|
T* operator->() {
|
|
PERFETTO_DCHECK(storage_.is_populated_);
|
|
return &storage_.value_;
|
|
}
|
|
|
|
const T& operator*() const& {
|
|
PERFETTO_DCHECK(storage_.is_populated_);
|
|
return storage_.value_;
|
|
}
|
|
|
|
T& operator*() & {
|
|
PERFETTO_DCHECK(storage_.is_populated_);
|
|
return storage_.value_;
|
|
}
|
|
|
|
const T&& operator*() const&& {
|
|
PERFETTO_DCHECK(storage_.is_populated_);
|
|
return std::move(storage_.value_);
|
|
}
|
|
|
|
T&& operator*() && {
|
|
PERFETTO_DCHECK(storage_.is_populated_);
|
|
return std::move(storage_.value_);
|
|
}
|
|
|
|
constexpr explicit operator bool() const { return storage_.is_populated_; }
|
|
|
|
constexpr bool has_value() const { return storage_.is_populated_; }
|
|
|
|
T& value() & {
|
|
PERFETTO_CHECK(storage_.is_populated_);
|
|
return storage_.value_;
|
|
}
|
|
|
|
const T& value() const& {
|
|
PERFETTO_CHECK(storage_.is_populated_);
|
|
return storage_.value_;
|
|
}
|
|
|
|
T&& value() && {
|
|
PERFETTO_CHECK(storage_.is_populated_);
|
|
return std::move(storage_.value_);
|
|
}
|
|
|
|
const T&& value() const&& {
|
|
PERFETTO_CHECK(storage_.is_populated_);
|
|
return std::move(storage_.value_);
|
|
}
|
|
|
|
template <class U>
|
|
constexpr T value_or(U&& default_value) const& {
|
|
static_assert(std::is_convertible<U, T>::value,
|
|
"U must be convertible to T");
|
|
return storage_.is_populated_
|
|
? storage_.value_
|
|
: static_cast<T>(std::forward<U>(default_value));
|
|
}
|
|
|
|
template <class U>
|
|
T value_or(U&& default_value) && {
|
|
static_assert(std::is_convertible<U, T>::value,
|
|
"U must be convertible to T");
|
|
return storage_.is_populated_
|
|
? std::move(storage_.value_)
|
|
: static_cast<T>(std::forward<U>(default_value));
|
|
}
|
|
|
|
void swap(Optional& other) {
|
|
if (!storage_.is_populated_ && !other.storage_.is_populated_)
|
|
return;
|
|
|
|
if (storage_.is_populated_ != other.storage_.is_populated_) {
|
|
if (storage_.is_populated_) {
|
|
other.storage_.Init(std::move(storage_.value_));
|
|
FreeIfNeeded();
|
|
} else {
|
|
storage_.Init(std::move(other.storage_.value_));
|
|
other.FreeIfNeeded();
|
|
}
|
|
return;
|
|
}
|
|
|
|
PERFETTO_DCHECK(storage_.is_populated_ && other.storage_.is_populated_);
|
|
using std::swap;
|
|
swap(**this, *other);
|
|
}
|
|
|
|
void reset() { FreeIfNeeded(); }
|
|
|
|
template <class... Args>
|
|
T& emplace(Args&&... args) {
|
|
FreeIfNeeded();
|
|
storage_.Init(std::forward<Args>(args)...);
|
|
return storage_.value_;
|
|
}
|
|
|
|
template <class U, class... Args>
|
|
typename std::enable_if<
|
|
std::is_constructible<T, std::initializer_list<U>&, Args&&...>::value,
|
|
T&>::type
|
|
emplace(std::initializer_list<U> il, Args&&... args) {
|
|
FreeIfNeeded();
|
|
storage_.Init(il, std::forward<Args>(args)...);
|
|
return storage_.value_;
|
|
}
|
|
|
|
private:
|
|
// Accessing template base class's protected member needs explicit
|
|
// declaration to do so.
|
|
using internal::OptionalBase<T>::CopyAssign;
|
|
using internal::OptionalBase<T>::FreeIfNeeded;
|
|
using internal::OptionalBase<T>::InitOrAssign;
|
|
using internal::OptionalBase<T>::MoveAssign;
|
|
using internal::OptionalBase<T>::storage_;
|
|
};
|
|
|
|
// Here after defines comparation operators. The definition follows
|
|
// http://en.cppreference.com/w/cpp/utility/optional/operator_cmp
|
|
// while bool() casting is replaced by has_value() to meet the chromium
|
|
// style guide.
|
|
template <class T, class U>
|
|
bool operator==(const Optional<T>& lhs, const Optional<U>& rhs) {
|
|
if (lhs.has_value() != rhs.has_value())
|
|
return false;
|
|
if (!lhs.has_value())
|
|
return true;
|
|
return *lhs == *rhs;
|
|
}
|
|
|
|
template <class T, class U>
|
|
bool operator!=(const Optional<T>& lhs, const Optional<U>& rhs) {
|
|
if (lhs.has_value() != rhs.has_value())
|
|
return true;
|
|
if (!lhs.has_value())
|
|
return false;
|
|
return *lhs != *rhs;
|
|
}
|
|
|
|
template <class T, class U>
|
|
bool operator<(const Optional<T>& lhs, const Optional<U>& rhs) {
|
|
if (!rhs.has_value())
|
|
return false;
|
|
if (!lhs.has_value())
|
|
return true;
|
|
return *lhs < *rhs;
|
|
}
|
|
|
|
template <class T, class U>
|
|
bool operator<=(const Optional<T>& lhs, const Optional<U>& rhs) {
|
|
if (!lhs.has_value())
|
|
return true;
|
|
if (!rhs.has_value())
|
|
return false;
|
|
return *lhs <= *rhs;
|
|
}
|
|
|
|
template <class T, class U>
|
|
bool operator>(const Optional<T>& lhs, const Optional<U>& rhs) {
|
|
if (!lhs.has_value())
|
|
return false;
|
|
if (!rhs.has_value())
|
|
return true;
|
|
return *lhs > *rhs;
|
|
}
|
|
|
|
template <class T, class U>
|
|
bool operator>=(const Optional<T>& lhs, const Optional<U>& rhs) {
|
|
if (!rhs.has_value())
|
|
return true;
|
|
if (!lhs.has_value())
|
|
return false;
|
|
return *lhs >= *rhs;
|
|
}
|
|
|
|
template <class T>
|
|
constexpr bool operator==(const Optional<T>& opt, nullopt_t) {
|
|
return !opt;
|
|
}
|
|
|
|
template <class T>
|
|
constexpr bool operator==(nullopt_t, const Optional<T>& opt) {
|
|
return !opt;
|
|
}
|
|
|
|
template <class T>
|
|
constexpr bool operator!=(const Optional<T>& opt, nullopt_t) {
|
|
return opt.has_value();
|
|
}
|
|
|
|
template <class T>
|
|
constexpr bool operator!=(nullopt_t, const Optional<T>& opt) {
|
|
return opt.has_value();
|
|
}
|
|
|
|
template <class T>
|
|
constexpr bool operator<(const Optional<T>&, nullopt_t) {
|
|
return false;
|
|
}
|
|
|
|
template <class T>
|
|
constexpr bool operator<(nullopt_t, const Optional<T>& opt) {
|
|
return opt.has_value();
|
|
}
|
|
|
|
template <class T>
|
|
constexpr bool operator<=(const Optional<T>& opt, nullopt_t) {
|
|
return !opt;
|
|
}
|
|
|
|
template <class T>
|
|
constexpr bool operator<=(nullopt_t, const Optional<T>&) {
|
|
return true;
|
|
}
|
|
|
|
template <class T>
|
|
constexpr bool operator>(const Optional<T>& opt, nullopt_t) {
|
|
return opt.has_value();
|
|
}
|
|
|
|
template <class T>
|
|
constexpr bool operator>(nullopt_t, const Optional<T>&) {
|
|
return false;
|
|
}
|
|
|
|
template <class T>
|
|
constexpr bool operator>=(const Optional<T>&, nullopt_t) {
|
|
return true;
|
|
}
|
|
|
|
template <class T>
|
|
constexpr bool operator>=(nullopt_t, const Optional<T>& opt) {
|
|
return !opt;
|
|
}
|
|
|
|
template <class T, class U>
|
|
constexpr bool operator==(const Optional<T>& opt, const U& value) {
|
|
return opt.has_value() ? *opt == value : false;
|
|
}
|
|
|
|
template <class T, class U>
|
|
constexpr bool operator==(const U& value, const Optional<T>& opt) {
|
|
return opt.has_value() ? value == *opt : false;
|
|
}
|
|
|
|
template <class T, class U>
|
|
constexpr bool operator!=(const Optional<T>& opt, const U& value) {
|
|
return opt.has_value() ? *opt != value : true;
|
|
}
|
|
|
|
template <class T, class U>
|
|
constexpr bool operator!=(const U& value, const Optional<T>& opt) {
|
|
return opt.has_value() ? value != *opt : true;
|
|
}
|
|
|
|
template <class T, class U>
|
|
constexpr bool operator<(const Optional<T>& opt, const U& value) {
|
|
return opt.has_value() ? *opt < value : true;
|
|
}
|
|
|
|
template <class T, class U>
|
|
constexpr bool operator<(const U& value, const Optional<T>& opt) {
|
|
return opt.has_value() ? value < *opt : false;
|
|
}
|
|
|
|
template <class T, class U>
|
|
constexpr bool operator<=(const Optional<T>& opt, const U& value) {
|
|
return opt.has_value() ? *opt <= value : true;
|
|
}
|
|
|
|
template <class T, class U>
|
|
constexpr bool operator<=(const U& value, const Optional<T>& opt) {
|
|
return opt.has_value() ? value <= *opt : false;
|
|
}
|
|
|
|
template <class T, class U>
|
|
constexpr bool operator>(const Optional<T>& opt, const U& value) {
|
|
return opt.has_value() ? *opt > value : false;
|
|
}
|
|
|
|
template <class T, class U>
|
|
constexpr bool operator>(const U& value, const Optional<T>& opt) {
|
|
return opt.has_value() ? value > *opt : true;
|
|
}
|
|
|
|
template <class T, class U>
|
|
constexpr bool operator>=(const Optional<T>& opt, const U& value) {
|
|
return opt.has_value() ? *opt >= value : false;
|
|
}
|
|
|
|
template <class T, class U>
|
|
constexpr bool operator>=(const U& value, const Optional<T>& opt) {
|
|
return opt.has_value() ? value >= *opt : true;
|
|
}
|
|
|
|
template <class T>
|
|
constexpr Optional<typename std::decay<T>::type> make_optional(T&& value) {
|
|
return Optional<typename std::decay<T>::type>(std::forward<T>(value));
|
|
}
|
|
|
|
template <class T, class... Args>
|
|
constexpr Optional<T> make_optional(Args&&... args) {
|
|
return Optional<T>(in_place, std::forward<Args>(args)...);
|
|
}
|
|
|
|
template <class T, class U, class... Args>
|
|
constexpr Optional<T> make_optional(std::initializer_list<U> il,
|
|
Args&&... args) {
|
|
return Optional<T>(in_place, il, std::forward<Args>(args)...);
|
|
}
|
|
|
|
// Partial specialization for a function template is not allowed. Also, it is
|
|
// not allowed to add overload function to std namespace, while it is allowed
|
|
// to specialize the template in std. Thus, swap() (kind of) overloading is
|
|
// defined in base namespace, instead.
|
|
template <class T>
|
|
typename std::enable_if<std::is_move_constructible<T>::value &&
|
|
internal::IsSwappable<T>::value>::type
|
|
swap(Optional<T>& lhs, Optional<T>& rhs) {
|
|
lhs.swap(rhs);
|
|
}
|
|
|
|
} // namespace base
|
|
} // namespace perfetto
|
|
|
|
namespace std {
|
|
|
|
template <class T>
|
|
struct hash<perfetto::base::Optional<T>> {
|
|
size_t operator()(const perfetto::base::Optional<T>& opt) const {
|
|
return opt == perfetto::base::nullopt ? 0 : std::hash<T>()(*opt);
|
|
}
|
|
};
|
|
|
|
} // namespace std
|
|
|
|
#endif // INCLUDE_PERFETTO_EXT_BASE_OPTIONAL_H_
|
|
// gen_amalgamated begin header: include/perfetto/ext/base/string_view.h
|
|
// gen_amalgamated begin header: include/perfetto/ext/base/hash.h
|
|
/*
|
|
* Copyright (C) 2019 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
#ifndef INCLUDE_PERFETTO_EXT_BASE_HASH_H_
|
|
#define INCLUDE_PERFETTO_EXT_BASE_HASH_H_
|
|
|
|
#include <stddef.h>
|
|
#include <stdint.h>
|
|
#include <type_traits>
|
|
|
|
namespace perfetto {
|
|
namespace base {
|
|
|
|
// A helper class which computes a 64-bit hash of the input data.
|
|
// The algorithm used is FNV-1a as it is fast and easy to implement and has
|
|
// relatively few collisions.
|
|
// WARNING: This hash function should not be used for any cryptographic purpose.
|
|
class Hash {
|
|
public:
|
|
// Creates an empty hash object
|
|
Hash() {}
|
|
|
|
// Hashes a numeric value.
|
|
template <typename T,
|
|
typename std::enable_if<std::is_arithmetic<T>::value>* = nullptr>
|
|
void Update(T data) {
|
|
Update(reinterpret_cast<const char*>(&data), sizeof(data));
|
|
}
|
|
|
|
// Hashes a byte array.
|
|
void Update(const char* data, size_t size) {
|
|
for (size_t i = 0; i < size; i++) {
|
|
result_ ^= static_cast<uint8_t>(data[i]);
|
|
result_ *= kFnv1a64Prime;
|
|
}
|
|
}
|
|
|
|
uint64_t digest() { return result_; }
|
|
|
|
private:
|
|
static constexpr uint64_t kFnv1a64OffsetBasis = 0xcbf29ce484222325;
|
|
static constexpr uint64_t kFnv1a64Prime = 0x100000001b3;
|
|
|
|
uint64_t result_ = kFnv1a64OffsetBasis;
|
|
};
|
|
|
|
} // namespace base
|
|
} // namespace perfetto
|
|
|
|
#endif // INCLUDE_PERFETTO_EXT_BASE_HASH_H_
|
|
/*
|
|
* Copyright (C) 2018 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
#ifndef INCLUDE_PERFETTO_EXT_BASE_STRING_VIEW_H_
|
|
#define INCLUDE_PERFETTO_EXT_BASE_STRING_VIEW_H_
|
|
|
|
#include <string.h>
|
|
|
|
#include <algorithm>
|
|
#include <string>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/base/build_config.h"
|
|
// gen_amalgamated expanded: #include "perfetto/base/logging.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/hash.h"
|
|
|
|
namespace perfetto {
|
|
namespace base {
|
|
|
|
// A string-like object that refers to a non-owned piece of memory.
|
|
// Strings are internally NOT null terminated.
|
|
class StringView {
|
|
public:
|
|
static constexpr size_t npos = static_cast<size_t>(-1);
|
|
|
|
StringView() : data_(nullptr), size_(0) {}
|
|
StringView(const StringView&) = default;
|
|
StringView& operator=(const StringView&) = default;
|
|
StringView(const char* data, size_t size) : data_(data), size_(size) {
|
|
PERFETTO_DCHECK(size == 0 || data != nullptr);
|
|
}
|
|
|
|
// Allow implicit conversion from any class that has a |data| and |size| field
|
|
// and has the kConvertibleToStringView trait (e.g., protozero::ConstChars).
|
|
template <typename T, typename = std::enable_if<T::kConvertibleToStringView>>
|
|
StringView(const T& x) : StringView(x.data, x.size) {
|
|
PERFETTO_DCHECK(x.size == 0 || x.data != nullptr);
|
|
}
|
|
|
|
// Creates a StringView from a null-terminated C string.
|
|
// Deliberately not "explicit".
|
|
StringView(const char* cstr) : data_(cstr), size_(strlen(cstr)) {
|
|
PERFETTO_DCHECK(cstr != nullptr);
|
|
}
|
|
|
|
// This instead has to be explicit, as creating a StringView out of a
|
|
// std::string can be subtle.
|
|
explicit StringView(const std::string& str)
|
|
: data_(str.data()), size_(str.size()) {}
|
|
|
|
bool empty() const { return size_ == 0; }
|
|
size_t size() const { return size_; }
|
|
const char* data() const { return data_; }
|
|
const char* begin() const { return data_; }
|
|
const char* end() const { return data_ + size_; }
|
|
|
|
char at(size_t pos) const {
|
|
PERFETTO_DCHECK(pos < size_);
|
|
return data_[pos];
|
|
}
|
|
|
|
size_t find(char c, size_t start_pos = 0) const {
|
|
for (size_t i = start_pos; i < size_; ++i) {
|
|
if (data_[i] == c)
|
|
return i;
|
|
}
|
|
return npos;
|
|
}
|
|
|
|
size_t find(const StringView& str, size_t start_pos = 0) const {
|
|
if (start_pos > size())
|
|
return npos;
|
|
auto it = std::search(begin() + start_pos, end(), str.begin(), str.end());
|
|
size_t pos = static_cast<size_t>(it - begin());
|
|
return pos + str.size() <= size() ? pos : npos;
|
|
}
|
|
|
|
size_t find(const char* str, size_t start_pos = 0) const {
|
|
return find(StringView(str), start_pos);
|
|
}
|
|
|
|
size_t rfind(char c) const {
|
|
for (size_t i = size_; i > 0; --i) {
|
|
if (data_[i - 1] == c)
|
|
return i - 1;
|
|
}
|
|
return npos;
|
|
}
|
|
|
|
StringView substr(size_t pos, size_t count = npos) const {
|
|
if (pos >= size_)
|
|
return StringView("", 0);
|
|
size_t rcount = std::min(count, size_ - pos);
|
|
return StringView(data_ + pos, rcount);
|
|
}
|
|
|
|
bool CaseInsensitiveEq(const StringView& other) {
|
|
if (size() != other.size())
|
|
return false;
|
|
if (size() == 0)
|
|
return true;
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
|
|
return _strnicmp(data(), other.data(), size()) == 0;
|
|
#else
|
|
return strncasecmp(data(), other.data(), size()) == 0;
|
|
#endif
|
|
}
|
|
|
|
std::string ToStdString() const {
|
|
return data_ == nullptr ? "" : std::string(data_, size_);
|
|
}
|
|
|
|
uint64_t Hash() const {
|
|
base::Hash hasher;
|
|
hasher.Update(data_, size_);
|
|
return hasher.digest();
|
|
}
|
|
|
|
private:
|
|
const char* data_ = nullptr;
|
|
size_t size_ = 0;
|
|
};
|
|
|
|
inline bool operator==(const StringView& x, const StringView& y) {
|
|
if (x.size() != y.size())
|
|
return false;
|
|
if (x.size() == 0)
|
|
return true;
|
|
return memcmp(x.data(), y.data(), x.size()) == 0;
|
|
}
|
|
|
|
inline bool operator!=(const StringView& x, const StringView& y) {
|
|
return !(x == y);
|
|
}
|
|
|
|
inline bool operator<(const StringView& x, const StringView& y) {
|
|
auto size = std::min(x.size(), y.size());
|
|
if (size == 0)
|
|
return x.size() < y.size();
|
|
int result = memcmp(x.data(), y.data(), size);
|
|
return result < 0 || (result == 0 && x.size() < y.size());
|
|
}
|
|
|
|
inline bool operator>=(const StringView& x, const StringView& y) {
|
|
return !(x < y);
|
|
}
|
|
|
|
inline bool operator>(const StringView& x, const StringView& y) {
|
|
return y < x;
|
|
}
|
|
|
|
inline bool operator<=(const StringView& x, const StringView& y) {
|
|
return !(y < x);
|
|
}
|
|
|
|
} // namespace base
|
|
} // namespace perfetto
|
|
|
|
namespace std {
|
|
|
|
template <>
|
|
struct hash<::perfetto::base::StringView> {
|
|
size_t operator()(const ::perfetto::base::StringView& sv) const {
|
|
return static_cast<size_t>(sv.Hash());
|
|
}
|
|
};
|
|
|
|
} // namespace std
|
|
|
|
#endif // INCLUDE_PERFETTO_EXT_BASE_STRING_VIEW_H_
|
|
/*
|
|
* Copyright (C) 2018 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
#ifndef INCLUDE_PERFETTO_EXT_BASE_STRING_UTILS_H_
|
|
#define INCLUDE_PERFETTO_EXT_BASE_STRING_UTILS_H_
|
|
|
|
#include <string>
|
|
#include <vector>
|
|
|
|
#include <inttypes.h>
|
|
#include <stdlib.h>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/optional.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/string_view.h"
|
|
|
|
namespace perfetto {
|
|
namespace base {
|
|
|
|
inline char Lowercase(char c) {
|
|
return ('A' <= c && c <= 'Z') ? static_cast<char>(c - ('A' - 'a')) : c;
|
|
}
|
|
|
|
inline char Uppercase(char c) {
|
|
return ('a' <= c && c <= 'z') ? static_cast<char>(c + ('A' - 'a')) : c;
|
|
}
|
|
|
|
inline Optional<uint32_t> CStringToUInt32(const char* s, int base = 10) {
|
|
char* endptr = nullptr;
|
|
auto value = static_cast<uint32_t>(strtoul(s, &endptr, base));
|
|
return (*s && !*endptr) ? base::make_optional(value) : base::nullopt;
|
|
}
|
|
|
|
inline Optional<int32_t> CStringToInt32(const char* s, int base = 10) {
|
|
char* endptr = nullptr;
|
|
auto value = static_cast<int32_t>(strtol(s, &endptr, base));
|
|
return (*s && !*endptr) ? base::make_optional(value) : base::nullopt;
|
|
}
|
|
|
|
// Note: it saturates to 7fffffffffffffff if parsing a hex number >= 0x8000...
|
|
inline Optional<int64_t> CStringToInt64(const char* s, int base = 10) {
|
|
char* endptr = nullptr;
|
|
auto value = static_cast<int64_t>(strtoll(s, &endptr, base));
|
|
return (*s && !*endptr) ? base::make_optional(value) : base::nullopt;
|
|
}
|
|
|
|
inline Optional<uint64_t> CStringToUInt64(const char* s, int base = 10) {
|
|
char* endptr = nullptr;
|
|
auto value = static_cast<uint64_t>(strtoull(s, &endptr, base));
|
|
return (*s && !*endptr) ? base::make_optional(value) : base::nullopt;
|
|
}
|
|
|
|
inline Optional<double> CStringToDouble(const char* s) {
|
|
char* endptr = nullptr;
|
|
double value = strtod(s, &endptr);
|
|
Optional<double> result(base::nullopt);
|
|
if (*s != '\0' && *endptr == '\0')
|
|
result = value;
|
|
return result;
|
|
}
|
|
|
|
inline Optional<uint32_t> StringToUInt32(const std::string& s, int base = 10) {
|
|
return CStringToUInt32(s.c_str(), base);
|
|
}
|
|
|
|
inline Optional<int32_t> StringToInt32(const std::string& s, int base = 10) {
|
|
return CStringToInt32(s.c_str(), base);
|
|
}
|
|
|
|
inline Optional<uint64_t> StringToUInt64(const std::string& s, int base = 10) {
|
|
return CStringToUInt64(s.c_str(), base);
|
|
}
|
|
|
|
inline Optional<int64_t> StringToInt64(const std::string& s, int base = 10) {
|
|
return CStringToInt64(s.c_str(), base);
|
|
}
|
|
|
|
inline Optional<double> StringToDouble(const std::string& s) {
|
|
return CStringToDouble(s.c_str());
|
|
}
|
|
|
|
bool StartsWith(const std::string& str, const std::string& prefix);
|
|
bool EndsWith(const std::string& str, const std::string& suffix);
|
|
bool Contains(const std::string& haystack, const std::string& needle);
|
|
size_t Find(const StringView& needle, const StringView& haystack);
|
|
bool CaseInsensitiveEqual(const std::string& first, const std::string& second);
|
|
std::string Join(const std::vector<std::string>& parts,
|
|
const std::string& delim);
|
|
std::vector<std::string> SplitString(const std::string& text,
|
|
const std::string& delimiter);
|
|
std::string StripPrefix(const std::string& str, const std::string& prefix);
|
|
std::string StripSuffix(const std::string& str, const std::string& suffix);
|
|
std::string ToLower(const std::string& str);
|
|
std::string ToUpper(const std::string& str);
|
|
std::string StripChars(const std::string& str,
|
|
const std::string& chars,
|
|
char replacement);
|
|
std::string ToHex(const char* data, size_t size);
|
|
inline std::string ToHex(const std::string& s) {
|
|
return ToHex(s.c_str(), s.size());
|
|
}
|
|
std::string IntToHexString(uint32_t number);
|
|
std::string ReplaceAll(std::string str,
|
|
const std::string& to_replace,
|
|
const std::string& replacement);
|
|
std::string TrimLeading(const std::string& str);
|
|
|
|
} // namespace base
|
|
} // namespace perfetto
|
|
|
|
#endif // INCLUDE_PERFETTO_EXT_BASE_STRING_UTILS_H_
|
|
/*
|
|
* Copyright (C) 2018 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/string_utils.h"
|
|
|
|
#include <string.h>
|
|
|
|
#include <algorithm>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/base/logging.h"
|
|
|
|
namespace perfetto {
|
|
namespace base {
|
|
|
|
bool StartsWith(const std::string& str, const std::string& prefix) {
|
|
return str.compare(0, prefix.length(), prefix) == 0;
|
|
}
|
|
|
|
bool EndsWith(const std::string& str, const std::string& suffix) {
|
|
if (suffix.size() > str.size())
|
|
return false;
|
|
return str.compare(str.size() - suffix.size(), suffix.size(), suffix) == 0;
|
|
}
|
|
|
|
bool Contains(const std::string& haystack, const std::string& needle) {
|
|
return haystack.find(needle) != std::string::npos;
|
|
}
|
|
|
|
size_t Find(const StringView& needle, const StringView& haystack) {
|
|
if (needle.size() == 0)
|
|
return 0;
|
|
if (needle.size() > haystack.size())
|
|
return std::string::npos;
|
|
for (size_t i = 0; i < haystack.size() - (needle.size() - 1); ++i) {
|
|
if (strncmp(haystack.data() + i, needle.data(), needle.size()) == 0)
|
|
return i;
|
|
}
|
|
return std::string::npos;
|
|
}
|
|
|
|
bool CaseInsensitiveEqual(const std::string& first, const std::string& second) {
|
|
return first.size() == second.size() &&
|
|
std::equal(
|
|
first.begin(), first.end(), second.begin(),
|
|
[](char a, char b) { return Lowercase(a) == Lowercase(b); });
|
|
}
|
|
|
|
std::string Join(const std::vector<std::string>& parts,
|
|
const std::string& delim) {
|
|
std::string acc;
|
|
for (size_t i = 0; i < parts.size(); ++i) {
|
|
acc += parts[i];
|
|
if (i + 1 != parts.size()) {
|
|
acc += delim;
|
|
}
|
|
}
|
|
return acc;
|
|
}
|
|
|
|
std::vector<std::string> SplitString(const std::string& text,
|
|
const std::string& delimiter) {
|
|
PERFETTO_CHECK(!delimiter.empty());
|
|
|
|
std::vector<std::string> output;
|
|
size_t start = 0;
|
|
size_t next;
|
|
for (;;) {
|
|
next = std::min(text.find(delimiter, start), text.size());
|
|
if (next > start)
|
|
output.emplace_back(&text[start], next - start);
|
|
start = next + delimiter.size();
|
|
if (start >= text.size())
|
|
break;
|
|
}
|
|
return output;
|
|
}
|
|
|
|
std::string StripPrefix(const std::string& str, const std::string& prefix) {
|
|
return StartsWith(str, prefix) ? str.substr(prefix.size()) : str;
|
|
}
|
|
|
|
std::string StripSuffix(const std::string& str, const std::string& suffix) {
|
|
return EndsWith(str, suffix) ? str.substr(0, str.size() - suffix.size())
|
|
: str;
|
|
}
|
|
|
|
std::string ToUpper(const std::string& str) {
|
|
// Don't use toupper(), it depends on the locale.
|
|
std::string res(str);
|
|
auto end = res.end();
|
|
for (auto c = res.begin(); c != end; ++c)
|
|
*c = Uppercase(*c);
|
|
return res;
|
|
}
|
|
|
|
std::string ToLower(const std::string& str) {
|
|
// Don't use tolower(), it depends on the locale.
|
|
std::string res(str);
|
|
auto end = res.end();
|
|
for (auto c = res.begin(); c != end; ++c)
|
|
*c = Lowercase(*c);
|
|
return res;
|
|
}
|
|
|
|
std::string ToHex(const char* data, size_t size) {
|
|
std::string hex(2 * size + 1, 'x');
|
|
for (size_t i = 0; i < size; ++i) {
|
|
// snprintf prints 3 characters, the two hex digits and a null byte. As we
|
|
// write left to right, we keep overwriting the nullbytes, except for the
|
|
// last call to snprintf.
|
|
snprintf(&(hex[2 * i]), 3, "%02hhx", data[i]);
|
|
}
|
|
// Remove the trailing nullbyte produced by the last snprintf.
|
|
hex.resize(2 * size);
|
|
return hex;
|
|
}
|
|
|
|
std::string IntToHexString(uint32_t number) {
|
|
size_t max_size = 11; // Max uint32 is 0xFFFFFFFF + 1 for null byte.
|
|
std::string buf;
|
|
buf.resize(max_size);
|
|
auto final_size = snprintf(&buf[0], max_size, "0x%02x", number);
|
|
PERFETTO_DCHECK(final_size >= 0);
|
|
buf.resize(static_cast<size_t>(final_size)); // Cuts off the final null byte.
|
|
return buf;
|
|
}
|
|
|
|
std::string StripChars(const std::string& str,
|
|
const std::string& chars,
|
|
char replacement) {
|
|
std::string res(str);
|
|
const char* start = res.c_str();
|
|
const char* remove = chars.c_str();
|
|
for (const char* c = strpbrk(start, remove); c; c = strpbrk(c + 1, remove))
|
|
res[static_cast<uintptr_t>(c - start)] = replacement;
|
|
return res;
|
|
}
|
|
|
|
std::string ReplaceAll(std::string str,
|
|
const std::string& to_replace,
|
|
const std::string& replacement) {
|
|
PERFETTO_CHECK(!to_replace.empty());
|
|
size_t pos = 0;
|
|
while ((pos = str.find(to_replace, pos)) != std::string::npos) {
|
|
str.replace(pos, to_replace.length(), replacement);
|
|
pos += replacement.length();
|
|
}
|
|
return str;
|
|
}
|
|
|
|
std::string TrimLeading(const std::string& str) {
|
|
size_t idx = str.find_first_not_of(' ');
|
|
return idx == std::string::npos ? str : str.substr(idx);
|
|
}
|
|
|
|
} // namespace base
|
|
} // namespace perfetto
|
|
// gen_amalgamated begin source: src/base/string_view.cc
|
|
/*
|
|
* Copyright (C) 2019 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/string_view.h"
|
|
|
|
namespace perfetto {
|
|
namespace base {
|
|
|
|
// static
|
|
constexpr size_t StringView::npos;
|
|
|
|
} // namespace base
|
|
} // namespace perfetto
|
|
// gen_amalgamated begin source: src/base/subprocess.cc
|
|
// gen_amalgamated begin header: include/perfetto/ext/base/subprocess.h
|
|
// gen_amalgamated begin header: include/perfetto/ext/base/pipe.h
|
|
/*
|
|
* Copyright (C) 2018 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
#ifndef INCLUDE_PERFETTO_EXT_BASE_PIPE_H_
|
|
#define INCLUDE_PERFETTO_EXT_BASE_PIPE_H_
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/scoped_file.h"
|
|
|
|
namespace perfetto {
|
|
namespace base {
|
|
|
|
class Pipe {
|
|
public:
|
|
enum Flags {
|
|
kBothBlock = 0,
|
|
kBothNonBlock,
|
|
kRdNonBlock,
|
|
kWrNonBlock,
|
|
};
|
|
|
|
static Pipe Create(Flags = kBothBlock);
|
|
|
|
Pipe();
|
|
Pipe(Pipe&&) noexcept;
|
|
Pipe& operator=(Pipe&&);
|
|
|
|
ScopedFile rd;
|
|
ScopedFile wr;
|
|
};
|
|
|
|
} // namespace base
|
|
} // namespace perfetto
|
|
|
|
#endif // INCLUDE_PERFETTO_EXT_BASE_PIPE_H_
|
|
/*
|
|
* Copyright (C) 2020 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
#ifndef INCLUDE_PERFETTO_EXT_BASE_SUBPROCESS_H_
|
|
#define INCLUDE_PERFETTO_EXT_BASE_SUBPROCESS_H_
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/base/build_config.h"
|
|
|
|
// This is a #if as opposite to a GN condition, because GN conditions aren't propagated when
|
|
// translating to Bazel or other build systems, as they get resolved at translation time. Without
|
|
// this, the Bazel build breaks on Windows.
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_OS_LINUX) || \
|
|
PERFETTO_BUILDFLAG(PERFETTO_OS_ANDROID) || \
|
|
PERFETTO_BUILDFLAG(PERFETTO_OS_APPLE)
|
|
#define PERFETTO_HAS_SUBPROCESS() 1
|
|
#else
|
|
#define PERFETTO_HAS_SUBPROCESS() 0
|
|
#endif
|
|
|
|
#include <functional>
|
|
#include <initializer_list>
|
|
#include <string>
|
|
#include <thread>
|
|
#include <vector>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/base/logging.h"
|
|
// gen_amalgamated expanded: #include "perfetto/base/proc_utils.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/pipe.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/scoped_file.h"
|
|
|
|
namespace perfetto {
|
|
namespace base {
|
|
|
|
// Handles creation and lifecycle management of subprocesses, taking care of
|
|
// all subtleties involved in handling processes on UNIX.
|
|
// This class allows to deal with macro two use-cases:
|
|
// 1) fork() + exec() equivalent: for spawning a brand new process image.
|
|
// This happens when |args.exec_cmd| is not empty.
|
|
// This is safe to use even in a multi-threaded environment.
|
|
// 2) fork(): for spawning a process and running a function.
|
|
// This happens when |args.entrypoint_for_testing| is not empty.
|
|
// This is intended only for tests as it is extremely subtle.
|
|
// This mode must be used with extreme care. Before the entrypoint is
|
|
// invoked all file descriptors other than stdin/out/err and the ones
|
|
// specified in |args.preserve_fds| will be closed, to avoid each process
|
|
// retaining a dupe of other subprocesses pipes. This however means that
|
|
// any non trivial calls (including logging) must be avoided as they might
|
|
// refer to FDs that are now closed. The entrypoint should really be used
|
|
// just to signal a pipe or similar for synchronizing sequencing in tests.
|
|
|
|
//
|
|
// This class allows to control stdin/out/err pipe redirection and takes care
|
|
// of keeping all the pipes pumped (stdin) / drained (stdout/err), in a similar
|
|
// fashion of python's subprocess.Communicate()
|
|
// stdin: is always piped and closed once the |args.input| buffer is written.
|
|
// stdout/err can be either:
|
|
// - dup()ed onto the parent process stdout/err.
|
|
// - redirected onto /dev/null.
|
|
// - piped onto a buffer (see output() method). There is only one output
|
|
// buffer in total. If both stdout and stderr are set to kBuffer mode, they
|
|
// will be merged onto the same. There doesn't seem any use case where they
|
|
// are needed distinctly.
|
|
//
|
|
// Some caveats worth mentioning:
|
|
// - It always waitpid()s, to avoid leaving zombies around. If the process is
|
|
// not terminated by the time the destructor is reached, the dtor will
|
|
// send a SIGKILL and wait for the termination.
|
|
// - After fork()-ing it will close all file descriptors, preserving only
|
|
// stdin/out/err and the fds listed in |args.preserve_fds|.
|
|
// - On Linux/Android, the child process will be SIGKILL-ed if the calling
|
|
// thread exists, even if the Subprocess is std::move()-d onto another thread.
|
|
// This happens by virtue PR_SET_PDEATHSIG, which is used to avoid that
|
|
// child processes are leaked in the case of a crash of the parent (frequent
|
|
// in tests). However, the child process might still be leaked if execing
|
|
// a setuid/setgid binary (see man 2 prctl).
|
|
//
|
|
// Usage:
|
|
// base::Subprocess p({"/bin/cat", "-"});
|
|
// (or equivalently:
|
|
// base::Subprocess p;
|
|
// p.args.exec_cmd.push_back("/bin/cat");
|
|
// p.args.exec_cmd.push_back("-");
|
|
// )
|
|
// p.args.stdout_mode = base::Subprocess::kBuffer;
|
|
// p.args.stderr_mode = base::Subprocess::kInherit;
|
|
// p.args.input = "stdin contents";
|
|
// p.Call();
|
|
// (or equivalently:
|
|
// p.Start();
|
|
// p.Wait();
|
|
// )
|
|
// EXPECT_EQ(p.status(), base::Subprocess::kExited);
|
|
// EXPECT_EQ(p.returncode(), 0);
|
|
class Subprocess {
|
|
public:
|
|
enum Status {
|
|
kNotStarted = 0, // Before calling Start() or Call().
|
|
kRunning, // After calling Start(), before Wait().
|
|
kExited, // The subprocess exited (either succesully or not).
|
|
kKilledBySignal, // The subprocess has been killed by a signal.
|
|
};
|
|
|
|
enum OutputMode {
|
|
kInherit = 0, // Inherit's the caller process stdout/stderr.
|
|
kDevNull, // dup() onto /dev/null
|
|
kBuffer, // dup() onto a pipe and move it into the output() buffer.
|
|
kFd, // dup() onto the passed args.fd.
|
|
};
|
|
|
|
// Input arguments for configuring the subprocess behavior.
|
|
struct Args {
|
|
Args(std::initializer_list<std::string> _cmd = {}) : exec_cmd(_cmd) {}
|
|
Args(Args&&) noexcept;
|
|
Args& operator=(Args&&);
|
|
// If non-empty this will cause an exec() when Start()/Call() are called.
|
|
std::vector<std::string> exec_cmd;
|
|
|
|
// If non-empty, it changes the argv[0] argument passed to exec. If
|
|
// unset, argv[0] == exec_cmd[0]. This is to handle cases like:
|
|
// exec_cmd = {"/proc/self/exec"}, argv0: "my_custom_test_override".
|
|
std::string argv0_override;
|
|
|
|
// If non-empty this will be invoked on the fork()-ed child process, after
|
|
// stdin/out/err has been redirected and all other file descriptor are
|
|
// closed.
|
|
// It is valid to specify both |exec_cmd| AND |entrypoint_for_testing|.
|
|
// In this case |entrypoint_for_testing| will be invoked just before the
|
|
// exec() call, but after having closed all fds % stdin/out/err.
|
|
// This is for synchronization barriers in tests.
|
|
std::function<void()> entrypoint_for_testing;
|
|
|
|
// If non-empty, replaces the environment passed to exec().
|
|
std::vector<std::string> env;
|
|
|
|
// The file descriptors in this list will not be closed.
|
|
std::vector<int> preserve_fds;
|
|
|
|
// The data to push in the child process stdin.
|
|
std::string input;
|
|
|
|
OutputMode stdout_mode = kInherit;
|
|
OutputMode stderr_mode = kInherit;
|
|
|
|
base::ScopedFile out_fd;
|
|
|
|
// Returns " ".join(exec_cmd), quoting arguments.
|
|
std::string GetCmdString() const;
|
|
};
|
|
|
|
struct ResourceUsage {
|
|
uint32_t cpu_utime_ms = 0;
|
|
uint32_t cpu_stime_ms = 0;
|
|
uint32_t max_rss_kb = 0;
|
|
uint32_t min_page_faults = 0;
|
|
uint32_t maj_page_faults = 0;
|
|
uint32_t vol_ctx_switch = 0;
|
|
uint32_t invol_ctx_switch = 0;
|
|
|
|
uint32_t cpu_time_ms() const { return cpu_utime_ms + cpu_stime_ms; }
|
|
};
|
|
|
|
explicit Subprocess(std::initializer_list<std::string> exec_cmd = {});
|
|
Subprocess(Subprocess&&) noexcept;
|
|
Subprocess& operator=(Subprocess&&);
|
|
~Subprocess(); // It will KillAndWaitForTermination() if still alive.
|
|
|
|
// Starts the subprocess but doesn't wait for its termination. The caller
|
|
// is expected to either call Wait() or Poll() after this call.
|
|
void Start();
|
|
|
|
// Wait for process termination. Can be called more than once.
|
|
// Args:
|
|
// |timeout_ms| = 0: wait indefinitely.
|
|
// |timeout_ms| > 0: wait for at most |timeout_ms|.
|
|
// Returns:
|
|
// True: The process terminated. See status() and returncode().
|
|
// False: Timeout reached, the process is still running. In this case the
|
|
// process will be left in the kRunning state.
|
|
bool Wait(int timeout_ms = 0);
|
|
|
|
// Equivalent of Start() + Wait();
|
|
// Returns true if the process exited cleanly with return code 0. False in
|
|
// any othe case.
|
|
bool Call(int timeout_ms = 0);
|
|
|
|
Status Poll();
|
|
|
|
// Sends a signal (SIGKILL if not specified) and wait for process termination.
|
|
void KillAndWaitForTermination(int sig_num = 0);
|
|
|
|
PlatformProcessId pid() const { return s_.pid; }
|
|
|
|
// The accessors below are updated only after a call to Poll(), Wait() or
|
|
// KillAndWaitForTermination().
|
|
// In most cases you want to call Poll() rather than these accessors.
|
|
|
|
Status status() const { return s_.status; }
|
|
int returncode() const { return s_.returncode; }
|
|
|
|
// This contains both stdout and stderr (if the corresponding _mode ==
|
|
// kBuffer). It's non-const so the caller can std::move() it.
|
|
std::string& output() { return s_.output; }
|
|
const ResourceUsage& rusage() const { return *s_.rusage; }
|
|
|
|
Args args;
|
|
|
|
private:
|
|
Subprocess(const Subprocess&) = delete;
|
|
Subprocess& operator=(const Subprocess&) = delete;
|
|
void TryPushStdin();
|
|
void TryReadStdoutAndErr();
|
|
void TryReadExitStatus();
|
|
void KillAtMostOnce();
|
|
bool PollInternal(int poll_timeout_ms);
|
|
|
|
// This is to deal robustly with the move operators, without having to
|
|
// manually maintain member-wise move instructions.
|
|
struct MovableState {
|
|
base::Pipe stdin_pipe;
|
|
base::Pipe stdouterr_pipe;
|
|
base::Pipe exit_status_pipe;
|
|
PlatformProcessId pid;
|
|
size_t input_written = 0;
|
|
Status status = kNotStarted;
|
|
int returncode = -1;
|
|
std::string output; // Stdin+stderr. Only when kBuffer.
|
|
std::thread waitpid_thread;
|
|
std::unique_ptr<ResourceUsage> rusage;
|
|
};
|
|
|
|
MovableState s_;
|
|
};
|
|
|
|
} // namespace base
|
|
} // namespace perfetto
|
|
|
|
#endif // INCLUDE_PERFETTO_EXT_BASE_SUBPROCESS_H_
|
|
/*
|
|
* Copyright (C) 2020 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/subprocess.h"
|
|
|
|
#if PERFETTO_HAS_SUBPROCESS()
|
|
|
|
#include <poll.h>
|
|
#include <signal.h>
|
|
#include <stdio.h>
|
|
#include <sys/resource.h>
|
|
#include <sys/types.h>
|
|
#include <sys/wait.h>
|
|
#include <unistd.h>
|
|
|
|
#include <algorithm>
|
|
#include <thread>
|
|
#include <tuple>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/base/build_config.h"
|
|
// gen_amalgamated expanded: #include "perfetto/base/logging.h"
|
|
// gen_amalgamated expanded: #include "perfetto/base/time.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/utils.h"
|
|
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_OS_LINUX) || \
|
|
PERFETTO_BUILDFLAG(PERFETTO_OS_ANDROID)
|
|
#include <sys/prctl.h>
|
|
#endif
|
|
|
|
// In MacOS this is not defined in any header.
|
|
extern "C" char** environ;
|
|
|
|
namespace perfetto {
|
|
namespace base {
|
|
|
|
namespace {
|
|
|
|
struct ChildProcessArgs {
|
|
Subprocess::Args* create_args;
|
|
const char* exec_cmd = nullptr;
|
|
std::vector<char*> argv;
|
|
std::vector<char*> env;
|
|
int stdin_pipe_rd = -1;
|
|
int stdouterr_pipe_wr = -1;
|
|
};
|
|
|
|
// Don't add any dynamic allocation in this function. This will be invoked
|
|
// under a fork(), potentially in a state where the allocator lock is held.
|
|
void __attribute__((noreturn)) ChildProcess(ChildProcessArgs* args) {
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_OS_LINUX) || \
|
|
PERFETTO_BUILDFLAG(PERFETTO_OS_ANDROID)
|
|
// In no case we want a child process to outlive its parent process. This is
|
|
// relevant for tests, so that a test failure/crash doesn't leave child
|
|
// processes around that get reparented to init.
|
|
prctl(PR_SET_PDEATHSIG, SIGKILL);
|
|
#endif
|
|
|
|
auto die = [args](const char* err) __attribute__((noreturn)) {
|
|
base::ignore_result(write(args->stdouterr_pipe_wr, err, strlen(err)));
|
|
base::ignore_result(write(args->stdouterr_pipe_wr, "\n", 1));
|
|
// From https://www.gnu.org/software/libc/manual/html_node/Exit-Status.html
|
|
// "In particular, the value 128 is used to indicate failure to execute
|
|
// another program in a subprocess. This convention is not universally
|
|
// obeyed, but it is a good idea to follow it in your programs."
|
|
_exit(128);
|
|
};
|
|
|
|
auto set_fd_close_on_exec = [&die](int fd, bool close_on_exec) {
|
|
int flags = fcntl(fd, F_GETFD, 0);
|
|
if (flags < 0)
|
|
die("fcntl(F_GETFD) failed");
|
|
flags = close_on_exec ? (flags | FD_CLOEXEC) : (flags & ~FD_CLOEXEC);
|
|
if (fcntl(fd, F_SETFD, flags) < 0)
|
|
die("fcntl(F_SETFD) failed");
|
|
};
|
|
|
|
if (getppid() == 1)
|
|
die("terminating because parent process died");
|
|
|
|
if (dup2(args->stdin_pipe_rd, STDIN_FILENO) == -1)
|
|
die("Failed to dup2(STDIN)");
|
|
close(args->stdin_pipe_rd);
|
|
|
|
switch (args->create_args->stdout_mode) {
|
|
case Subprocess::kInherit:
|
|
break;
|
|
case Subprocess::kDevNull: {
|
|
if (dup2(open("/dev/null", O_RDWR), STDOUT_FILENO) == -1)
|
|
die("Failed to dup2(STDOUT)");
|
|
break;
|
|
}
|
|
case Subprocess::kBuffer:
|
|
if (dup2(args->stdouterr_pipe_wr, STDOUT_FILENO) == -1)
|
|
die("Failed to dup2(STDOUT)");
|
|
break;
|
|
case Subprocess::kFd:
|
|
if (dup2(*args->create_args->out_fd, STDOUT_FILENO) == -1)
|
|
die("Failed to dup2(STDOUT)");
|
|
break;
|
|
}
|
|
|
|
switch (args->create_args->stderr_mode) {
|
|
case Subprocess::kInherit:
|
|
break;
|
|
case Subprocess::kDevNull: {
|
|
if (dup2(open("/dev/null", O_RDWR), STDERR_FILENO) == -1)
|
|
die("Failed to dup2(STDERR)");
|
|
break;
|
|
}
|
|
case Subprocess::kBuffer:
|
|
if (dup2(args->stdouterr_pipe_wr, STDERR_FILENO) == -1)
|
|
die("Failed to dup2(STDERR)");
|
|
break;
|
|
case Subprocess::kFd:
|
|
if (dup2(*args->create_args->out_fd, STDERR_FILENO) == -1)
|
|
die("Failed to dup2(STDERR)");
|
|
break;
|
|
}
|
|
|
|
// Close all FDs % stdin/out/err and the ones that the client explicitly
|
|
// asked to retain. The reason for this is twofold:
|
|
// 1. For exec-only (i.e. entrypoint == empty) cases: it avoids leaking FDs
|
|
// that didn't get marked as O_CLOEXEC by accident.
|
|
// 2. In fork() mode (entrypoint not empty) avoids retaining a dup of eventfds
|
|
// that would prevent the parent process to receive EOFs (tests usually use
|
|
// pipes as a synchronization mechanism between subprocesses).
|
|
const auto& preserve_fds = args->create_args->preserve_fds;
|
|
for (int i = 0; i < 512; i++) {
|
|
if (i != STDIN_FILENO && i != STDERR_FILENO && i != STDOUT_FILENO &&
|
|
i != args->stdouterr_pipe_wr &&
|
|
!std::count(preserve_fds.begin(), preserve_fds.end(), i)) {
|
|
close(i);
|
|
}
|
|
}
|
|
|
|
// Clears O_CLOEXEC from stdin/out/err. These are the only FDs that we want
|
|
// to be preserved after the exec().
|
|
set_fd_close_on_exec(STDIN_FILENO, false);
|
|
set_fd_close_on_exec(STDOUT_FILENO, false);
|
|
set_fd_close_on_exec(STDERR_FILENO, false);
|
|
|
|
// If the caller specified a std::function entrypoint, run that first.
|
|
if (args->create_args->entrypoint_for_testing)
|
|
args->create_args->entrypoint_for_testing();
|
|
|
|
// If the caller specified only an entrypoint, without any args, exit now.
|
|
// Otherwise proceed with the exec() below.
|
|
if (!args->exec_cmd)
|
|
_exit(0);
|
|
|
|
// If |args[0]| is a path use execv() (which takes a path), othewise use
|
|
// exevp(), which uses the shell and follows PATH.
|
|
if (strchr(args->exec_cmd, '/')) {
|
|
char** env = args->env.empty() ? environ : args->env.data();
|
|
execve(args->exec_cmd, args->argv.data(), env);
|
|
} else {
|
|
// There is no execvpe() on Mac.
|
|
if (!args->env.empty())
|
|
die("A full path is required for |exec_cmd| when setting |env|");
|
|
execvp(args->exec_cmd, args->argv.data());
|
|
}
|
|
|
|
// Reached only if execv fails.
|
|
die("execve() failed");
|
|
}
|
|
|
|
} // namespace
|
|
|
|
Subprocess::Args::Args(Args&&) noexcept = default;
|
|
Subprocess::Args& Subprocess::Args::operator=(Args&&) = default;
|
|
|
|
Subprocess::Subprocess(std::initializer_list<std::string> a) : args(a) {
|
|
s_.rusage.reset(new ResourceUsage());
|
|
}
|
|
|
|
Subprocess::Subprocess(Subprocess&& other) noexcept {
|
|
static_assert(sizeof(Subprocess) == sizeof(std::tuple<MovableState, Args>),
|
|
"base::Subprocess' move ctor needs updating");
|
|
s_ = std::move(other.s_);
|
|
args = std::move(other.args);
|
|
|
|
// Reset the state of the moved-from object.
|
|
other.s_.status = kNotStarted; // So the dtor doesn't try to kill().
|
|
other.~Subprocess();
|
|
new (&other) Subprocess();
|
|
}
|
|
|
|
Subprocess& Subprocess::operator=(Subprocess&& other) {
|
|
this->~Subprocess();
|
|
new (this) Subprocess(std::move(other));
|
|
return *this;
|
|
}
|
|
|
|
Subprocess::~Subprocess() {
|
|
if (s_.status == kRunning)
|
|
KillAndWaitForTermination();
|
|
PERFETTO_CHECK(!s_.waitpid_thread.joinable());
|
|
}
|
|
|
|
void Subprocess::Start() {
|
|
ChildProcessArgs proc_args;
|
|
proc_args.create_args = &args;
|
|
|
|
// Setup argv.
|
|
if (!args.exec_cmd.empty()) {
|
|
proc_args.exec_cmd = args.exec_cmd[0].c_str();
|
|
for (const std::string& arg : args.exec_cmd)
|
|
proc_args.argv.push_back(const_cast<char*>(arg.c_str()));
|
|
proc_args.argv.push_back(nullptr);
|
|
|
|
if (!args.argv0_override.empty())
|
|
proc_args.argv[0] = const_cast<char*>(args.argv0_override.c_str());
|
|
}
|
|
|
|
// Setup env.
|
|
if (!args.env.empty()) {
|
|
for (const std::string& str : args.env)
|
|
proc_args.env.push_back(const_cast<char*>(str.c_str()));
|
|
proc_args.env.push_back(nullptr);
|
|
}
|
|
|
|
// Setup the pipes for stdin/err redirection.
|
|
s_.stdin_pipe = base::Pipe::Create(base::Pipe::kWrNonBlock);
|
|
proc_args.stdin_pipe_rd = *s_.stdin_pipe.rd;
|
|
s_.stdouterr_pipe = base::Pipe::Create(base::Pipe::kRdNonBlock);
|
|
proc_args.stdouterr_pipe_wr = *s_.stdouterr_pipe.wr;
|
|
|
|
// Spawn the child process that will exec().
|
|
s_.pid = fork();
|
|
PERFETTO_CHECK(s_.pid >= 0);
|
|
if (s_.pid == 0) {
|
|
// Close the parent-ends of the pipes.
|
|
s_.stdin_pipe.wr.reset();
|
|
s_.stdouterr_pipe.rd.reset();
|
|
ChildProcess(&proc_args);
|
|
// ChildProcess() doesn't return, not even in case of failures.
|
|
PERFETTO_FATAL("not reached");
|
|
}
|
|
|
|
s_.status = kRunning;
|
|
|
|
// Close the child-end of the pipes.
|
|
// Deliberately NOT closing the s_.stdin_pipe.rd. This is to avoid crashing
|
|
// with a SIGPIPE if the process exits without consuming its stdin, while
|
|
// the parent tries to write() on the other end of the stdin pipe.
|
|
s_.stdouterr_pipe.wr.reset();
|
|
|
|
// Spawn a thread that is blocked on waitpid() and writes the termination
|
|
// status onto a pipe. The problem here is that waipid() doesn't have a
|
|
// timeout option and can't be passed to poll(). The alternative would be
|
|
// using a SIGCHLD handler, but anecdotally signal handlers introduce more
|
|
// problems than what they solve.
|
|
s_.exit_status_pipe = base::Pipe::Create(base::Pipe::kRdNonBlock);
|
|
|
|
// Both ends of the pipe are closed after the thread.join().
|
|
int pid = s_.pid;
|
|
int exit_status_pipe_wr = s_.exit_status_pipe.wr.release();
|
|
auto* rusage = s_.rusage.get();
|
|
s_.waitpid_thread = std::thread([pid, exit_status_pipe_wr, rusage] {
|
|
int pid_stat = -1;
|
|
struct rusage usg {};
|
|
int wait_res = PERFETTO_EINTR(wait4(pid, &pid_stat, 0, &usg));
|
|
PERFETTO_CHECK(wait_res == pid);
|
|
|
|
auto tv_to_ms = [](const struct timeval& tv) {
|
|
return static_cast<uint32_t>(tv.tv_sec * 1000 + tv.tv_usec / 1000);
|
|
};
|
|
rusage->cpu_utime_ms = tv_to_ms(usg.ru_utime);
|
|
rusage->cpu_stime_ms = tv_to_ms(usg.ru_stime);
|
|
rusage->max_rss_kb = static_cast<uint32_t>(usg.ru_maxrss) / 1000;
|
|
rusage->min_page_faults = static_cast<uint32_t>(usg.ru_minflt);
|
|
rusage->maj_page_faults = static_cast<uint32_t>(usg.ru_majflt);
|
|
rusage->vol_ctx_switch = static_cast<uint32_t>(usg.ru_nvcsw);
|
|
rusage->invol_ctx_switch = static_cast<uint32_t>(usg.ru_nivcsw);
|
|
|
|
base::ignore_result(PERFETTO_EINTR(
|
|
write(exit_status_pipe_wr, &pid_stat, sizeof(pid_stat))));
|
|
PERFETTO_CHECK(close(exit_status_pipe_wr) == 0 || errno == EINTR);
|
|
});
|
|
}
|
|
|
|
Subprocess::Status Subprocess::Poll() {
|
|
if (s_.status != kRunning)
|
|
return s_.status; // Nothing to poll.
|
|
while (PollInternal(0 /* don't block*/)) {
|
|
}
|
|
return s_.status;
|
|
}
|
|
|
|
// |timeout_ms| semantic:
|
|
// -1: Block indefinitely.
|
|
// 0: Don't block, return immediately.
|
|
// >0: Block for at most X ms.
|
|
// Returns:
|
|
// True: Read at least one fd (so there might be more queued).
|
|
// False: if all fds reached quiescent (no data to read/write).
|
|
bool Subprocess::PollInternal(int poll_timeout_ms) {
|
|
struct pollfd fds[3]{};
|
|
size_t num_fds = 0;
|
|
if (s_.exit_status_pipe.rd) {
|
|
fds[num_fds].fd = *s_.exit_status_pipe.rd;
|
|
fds[num_fds].events = POLLIN;
|
|
num_fds++;
|
|
}
|
|
if (s_.stdouterr_pipe.rd) {
|
|
fds[num_fds].fd = *s_.stdouterr_pipe.rd;
|
|
fds[num_fds].events = POLLIN;
|
|
num_fds++;
|
|
}
|
|
if (s_.stdin_pipe.wr) {
|
|
fds[num_fds].fd = *s_.stdin_pipe.wr;
|
|
fds[num_fds].events = POLLOUT;
|
|
num_fds++;
|
|
}
|
|
|
|
if (num_fds == 0)
|
|
return false;
|
|
|
|
auto nfds = static_cast<nfds_t>(num_fds);
|
|
int poll_res = PERFETTO_EINTR(poll(fds, nfds, poll_timeout_ms));
|
|
PERFETTO_CHECK(poll_res >= 0);
|
|
|
|
TryReadStdoutAndErr();
|
|
TryPushStdin();
|
|
TryReadExitStatus();
|
|
|
|
return poll_res > 0;
|
|
}
|
|
|
|
bool Subprocess::Wait(int timeout_ms) {
|
|
PERFETTO_CHECK(s_.status != kNotStarted);
|
|
|
|
// Break out of the loop only after both conditions are satisfied:
|
|
// - All stdout/stderr data has been read (if kBuffer).
|
|
// - The process exited.
|
|
// Note that the two events can happen arbitrary order. After the process
|
|
// exits, there might be still data in the pipe buffer, which we want to
|
|
// read fully.
|
|
//
|
|
// Instead, don't wait on the stdin to be fully written. The child process
|
|
// might exit prematurely (or crash). If that happens, we can end up in a
|
|
// state where the write(stdin_pipe_.wr) will never unblock.
|
|
|
|
const int64_t t_start = base::GetWallTimeMs().count();
|
|
while (s_.exit_status_pipe.rd || s_.stdouterr_pipe.rd) {
|
|
int poll_timeout_ms = -1; // Block until a FD is ready.
|
|
if (timeout_ms > 0) {
|
|
const int64_t now = GetWallTimeMs().count();
|
|
poll_timeout_ms = timeout_ms - static_cast<int>(now - t_start);
|
|
if (poll_timeout_ms <= 0)
|
|
return false;
|
|
}
|
|
PollInternal(poll_timeout_ms);
|
|
} // while(...)
|
|
return true;
|
|
}
|
|
|
|
bool Subprocess::Call(int timeout_ms) {
|
|
PERFETTO_CHECK(s_.status == kNotStarted);
|
|
Start();
|
|
|
|
if (!Wait(timeout_ms)) {
|
|
KillAndWaitForTermination();
|
|
// TryReadExitStatus must have joined the thread.
|
|
PERFETTO_DCHECK(!s_.waitpid_thread.joinable());
|
|
}
|
|
PERFETTO_DCHECK(s_.status != kRunning);
|
|
return s_.status == kExited && s_.returncode == 0;
|
|
}
|
|
|
|
void Subprocess::TryReadExitStatus() {
|
|
if (!s_.exit_status_pipe.rd)
|
|
return;
|
|
|
|
int pid_stat = -1;
|
|
int64_t rsize = PERFETTO_EINTR(
|
|
read(*s_.exit_status_pipe.rd, &pid_stat, sizeof(pid_stat)));
|
|
if (rsize < 0 && errno == EAGAIN)
|
|
return;
|
|
|
|
if (rsize > 0) {
|
|
PERFETTO_CHECK(rsize == sizeof(pid_stat));
|
|
} else if (rsize < 0) {
|
|
PERFETTO_PLOG("Subprocess read(s_.exit_status_pipe) failed");
|
|
}
|
|
s_.waitpid_thread.join();
|
|
s_.exit_status_pipe.rd.reset();
|
|
|
|
if (WIFEXITED(pid_stat)) {
|
|
s_.returncode = WEXITSTATUS(pid_stat);
|
|
s_.status = kExited;
|
|
} else if (WIFSIGNALED(pid_stat)) {
|
|
s_.returncode = 128 + WTERMSIG(pid_stat); // Follow bash convention.
|
|
s_.status = kKilledBySignal;
|
|
} else {
|
|
PERFETTO_FATAL("waitpid() returned an unexpected value (0x%x)", pid_stat);
|
|
}
|
|
}
|
|
|
|
// If the stidn pipe is still open, push input data and close it at the end.
|
|
void Subprocess::TryPushStdin() {
|
|
if (!s_.stdin_pipe.wr)
|
|
return;
|
|
|
|
PERFETTO_DCHECK(args.input.empty() || s_.input_written < args.input.size());
|
|
if (args.input.size()) {
|
|
int64_t wsize =
|
|
PERFETTO_EINTR(write(*s_.stdin_pipe.wr, &args.input[s_.input_written],
|
|
args.input.size() - s_.input_written));
|
|
if (wsize < 0 && errno == EAGAIN)
|
|
return;
|
|
|
|
if (wsize >= 0) {
|
|
// Whether write() can return 0 is one of the greatest mysteries of UNIX.
|
|
// Just ignore it.
|
|
s_.input_written += static_cast<size_t>(wsize);
|
|
} else {
|
|
PERFETTO_PLOG("Subprocess write(stdin) failed");
|
|
s_.stdin_pipe.wr.reset();
|
|
}
|
|
}
|
|
PERFETTO_DCHECK(s_.input_written <= args.input.size());
|
|
if (s_.input_written == args.input.size())
|
|
s_.stdin_pipe.wr.reset(); // Close stdin.
|
|
}
|
|
|
|
void Subprocess::TryReadStdoutAndErr() {
|
|
if (!s_.stdouterr_pipe.rd)
|
|
return;
|
|
char buf[4096];
|
|
int64_t rsize = PERFETTO_EINTR(read(*s_.stdouterr_pipe.rd, buf, sizeof(buf)));
|
|
if (rsize < 0 && errno == EAGAIN)
|
|
return;
|
|
|
|
if (rsize > 0) {
|
|
s_.output.append(buf, static_cast<size_t>(rsize));
|
|
} else if (rsize == 0 /* EOF */) {
|
|
s_.stdouterr_pipe.rd.reset();
|
|
} else {
|
|
PERFETTO_PLOG("Subprocess read(stdout/err) failed");
|
|
s_.stdouterr_pipe.rd.reset();
|
|
}
|
|
}
|
|
|
|
void Subprocess::KillAndWaitForTermination(int sig_num) {
|
|
kill(s_.pid, sig_num ? sig_num : SIGKILL);
|
|
Wait();
|
|
}
|
|
|
|
std::string Subprocess::Args::GetCmdString() const {
|
|
std::string str;
|
|
for (size_t i = 0; i < exec_cmd.size(); i++) {
|
|
str += i > 0 ? " \"" : "";
|
|
str += exec_cmd[i];
|
|
str += i > 0 ? "\"" : "";
|
|
}
|
|
return str;
|
|
}
|
|
|
|
} // namespace base
|
|
} // namespace perfetto
|
|
|
|
#endif // PERFETTO_HAS_SUBPROCESS()
|
|
// gen_amalgamated begin source: src/base/thread_checker.cc
|
|
// gen_amalgamated begin header: include/perfetto/ext/base/thread_checker.h
|
|
/*
|
|
* Copyright (C) 2017 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
#ifndef INCLUDE_PERFETTO_EXT_BASE_THREAD_CHECKER_H_
|
|
#define INCLUDE_PERFETTO_EXT_BASE_THREAD_CHECKER_H_
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/base/build_config.h"
|
|
|
|
#if !PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
|
|
#include <pthread.h>
|
|
#endif
|
|
#include <atomic>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/base/export.h"
|
|
// gen_amalgamated expanded: #include "perfetto/base/logging.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/utils.h"
|
|
|
|
namespace perfetto {
|
|
namespace base {
|
|
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
|
|
using ThreadID = unsigned long;
|
|
#else
|
|
using ThreadID = pthread_t;
|
|
#endif
|
|
|
|
class PERFETTO_EXPORT ThreadChecker {
|
|
public:
|
|
ThreadChecker();
|
|
~ThreadChecker();
|
|
ThreadChecker(const ThreadChecker&);
|
|
ThreadChecker& operator=(const ThreadChecker&);
|
|
bool CalledOnValidThread() const PERFETTO_WARN_UNUSED_RESULT;
|
|
void DetachFromThread();
|
|
|
|
private:
|
|
mutable std::atomic<ThreadID> thread_id_;
|
|
};
|
|
|
|
#if PERFETTO_DCHECK_IS_ON() && !PERFETTO_BUILDFLAG(PERFETTO_CHROMIUM_BUILD)
|
|
// TODO(primiano) Use Chromium's thread checker in Chromium.
|
|
#define PERFETTO_THREAD_CHECKER(name) base::ThreadChecker name;
|
|
#define PERFETTO_DCHECK_THREAD(name) \
|
|
PERFETTO_DCHECK((name).CalledOnValidThread())
|
|
#define PERFETTO_DETACH_FROM_THREAD(name) (name).DetachFromThread()
|
|
#else
|
|
#define PERFETTO_THREAD_CHECKER(name)
|
|
#define PERFETTO_DCHECK_THREAD(name)
|
|
#define PERFETTO_DETACH_FROM_THREAD(name)
|
|
#endif // PERFETTO_DCHECK_IS_ON()
|
|
|
|
} // namespace base
|
|
} // namespace perfetto
|
|
|
|
#endif // INCLUDE_PERFETTO_EXT_BASE_THREAD_CHECKER_H_
|
|
/*
|
|
* Copyright (C) 2017 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/thread_checker.h"
|
|
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
|
|
#include <Windows.h>
|
|
#endif
|
|
|
|
namespace perfetto {
|
|
namespace base {
|
|
|
|
namespace {
|
|
constexpr ThreadID kDetached{};
|
|
|
|
ThreadID CurrentThreadId() {
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
|
|
return ::GetCurrentThreadId();
|
|
#else
|
|
return pthread_self();
|
|
#endif
|
|
}
|
|
} // namespace
|
|
|
|
ThreadChecker::ThreadChecker() {
|
|
thread_id_.store(CurrentThreadId());
|
|
}
|
|
|
|
ThreadChecker::~ThreadChecker() = default;
|
|
|
|
ThreadChecker::ThreadChecker(const ThreadChecker& other) {
|
|
thread_id_ = other.thread_id_.load();
|
|
}
|
|
|
|
ThreadChecker& ThreadChecker::operator=(const ThreadChecker& other) {
|
|
thread_id_ = other.thread_id_.load();
|
|
return *this;
|
|
}
|
|
|
|
bool ThreadChecker::CalledOnValidThread() const {
|
|
auto self = CurrentThreadId();
|
|
|
|
// Will re-attach if previously detached using DetachFromThread().
|
|
auto prev_value = kDetached;
|
|
if (thread_id_.compare_exchange_strong(prev_value, self))
|
|
return true;
|
|
return prev_value == self;
|
|
}
|
|
|
|
void ThreadChecker::DetachFromThread() {
|
|
thread_id_.store(kDetached);
|
|
}
|
|
|
|
} // namespace base
|
|
} // namespace perfetto
|
|
// gen_amalgamated begin source: src/base/time.cc
|
|
/*
|
|
* Copyright (C) 2018 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/base/time.h"
|
|
// gen_amalgamated expanded: #include "perfetto/base/build_config.h"
|
|
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
|
|
#include <Windows.h>
|
|
#else
|
|
#include <unistd.h>
|
|
#endif
|
|
|
|
namespace perfetto {
|
|
namespace base {
|
|
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
|
|
|
|
TimeNanos GetWallTimeNs() {
|
|
LARGE_INTEGER freq;
|
|
::QueryPerformanceFrequency(&freq);
|
|
LARGE_INTEGER counter;
|
|
::QueryPerformanceCounter(&counter);
|
|
double elapsed_nanoseconds = (1e9 * counter.QuadPart) / freq.QuadPart;
|
|
return TimeNanos(static_cast<uint64_t>(elapsed_nanoseconds));
|
|
}
|
|
|
|
TimeNanos GetThreadCPUTimeNs() {
|
|
FILETIME dummy, kernel_ftime, user_ftime;
|
|
::GetThreadTimes(GetCurrentThread(), &dummy, &dummy, &kernel_ftime,
|
|
&user_ftime);
|
|
uint64_t kernel_time = kernel_ftime.dwHighDateTime * 0x100000000 +
|
|
kernel_ftime.dwLowDateTime;
|
|
uint64_t user_time = user_ftime.dwHighDateTime * 0x100000000 +
|
|
user_ftime.dwLowDateTime;
|
|
|
|
return TimeNanos((kernel_time + user_time) * 100);
|
|
}
|
|
|
|
void SleepMicroseconds(unsigned interval_us) {
|
|
// The Windows Sleep function takes a millisecond count. Round up so that
|
|
// short sleeps don't turn into a busy wait. Note that the sleep granularity
|
|
// on Windows can dynamically vary from 1 ms to ~16 ms, so don't count on this
|
|
// being a short sleep.
|
|
::Sleep(static_cast<DWORD>((interval_us + 999) / 1000));
|
|
}
|
|
|
|
#else // PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
|
|
|
|
void SleepMicroseconds(unsigned interval_us) {
|
|
::usleep(static_cast<useconds_t>(interval_us));
|
|
}
|
|
|
|
#endif // PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
|
|
|
|
} // namespace base
|
|
} // namespace perfetto
|
|
// gen_amalgamated begin source: src/base/uuid.cc
|
|
// gen_amalgamated begin header: include/perfetto/ext/base/uuid.h
|
|
/*
|
|
* Copyright (C) 2019 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
#ifndef INCLUDE_PERFETTO_EXT_BASE_UUID_H_
|
|
#define INCLUDE_PERFETTO_EXT_BASE_UUID_H_
|
|
|
|
#include <array>
|
|
#include <string>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/optional.h"
|
|
|
|
namespace perfetto {
|
|
namespace base {
|
|
|
|
class Uuid {
|
|
public:
|
|
explicit Uuid(const std::string& s);
|
|
explicit Uuid(int64_t lsb, int64_t msb);
|
|
Uuid();
|
|
|
|
std::array<uint8_t, 16>* data() { return &data_; }
|
|
const std::array<uint8_t, 16>* data() const { return &data_; }
|
|
|
|
bool operator==(const Uuid& other) const { return data_ == other.data_; }
|
|
|
|
bool operator!=(const Uuid& other) const { return !(*this == other); }
|
|
|
|
int64_t msb() const {
|
|
int64_t result;
|
|
memcpy(&result, data_.data() + 8, 8);
|
|
return result;
|
|
}
|
|
|
|
int64_t lsb() const {
|
|
int64_t result;
|
|
memcpy(&result, data_.data(), 8);
|
|
return result;
|
|
}
|
|
|
|
void set_lsb_msb(int64_t lsb, int64_t msb) {
|
|
set_lsb(lsb);
|
|
set_msb(msb);
|
|
}
|
|
void set_msb(int64_t msb) { memcpy(data_.data() + 8, &msb, 8); }
|
|
void set_lsb(int64_t lsb) { memcpy(data_.data(), &lsb, 8); }
|
|
|
|
std::string ToString() const;
|
|
std::string ToPrettyString() const;
|
|
|
|
private:
|
|
std::array<uint8_t, 16> data_{};
|
|
};
|
|
|
|
Uuid Uuidv4();
|
|
|
|
} // namespace base
|
|
} // namespace perfetto
|
|
|
|
#endif // INCLUDE_PERFETTO_EXT_BASE_UUID_H_
|
|
/*
|
|
* Copyright (C) 2019 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/uuid.h"
|
|
|
|
#include <random>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/base/time.h"
|
|
|
|
namespace perfetto {
|
|
namespace base {
|
|
namespace {
|
|
|
|
constexpr char kHexmap[] = {'0', '1', '2', '3', '4', '5', '6', '7',
|
|
'8', '9', 'a', 'b', 'c', 'd', 'e', 'f'};
|
|
} // namespace
|
|
|
|
// See https://www.ietf.org/rfc/rfc4122.txt
|
|
Uuid Uuidv4() {
|
|
static std::minstd_rand rng(static_cast<uint32_t>(GetBootTimeNs().count()));
|
|
Uuid uuid;
|
|
auto& data = *uuid.data();
|
|
|
|
for (size_t i = 0; i < 16; ++i)
|
|
data[i] = static_cast<uint8_t>(rng());
|
|
|
|
// version:
|
|
data[6] = (data[6] & 0x0f) | 0x40;
|
|
// clock_seq_hi_and_reserved:
|
|
data[8] = (data[8] & 0x3f) | 0x80;
|
|
|
|
return uuid;
|
|
}
|
|
|
|
Uuid::Uuid() {}
|
|
|
|
Uuid::Uuid(const std::string& s) {
|
|
PERFETTO_CHECK(s.size() == data_.size());
|
|
memcpy(data_.data(), s.data(), s.size());
|
|
}
|
|
|
|
Uuid::Uuid(int64_t lsb, int64_t msb) {
|
|
set_lsb_msb(lsb, msb);
|
|
}
|
|
|
|
std::string Uuid::ToString() const {
|
|
return std::string(reinterpret_cast<const char*>(data_.data()), data_.size());
|
|
}
|
|
|
|
std::string Uuid::ToPrettyString() const {
|
|
std::string s(data_.size() * 2 + 4, '-');
|
|
// Format is 123e4567-e89b-12d3-a456-426655443322.
|
|
size_t j = 0;
|
|
for (size_t i = 0; i < data_.size(); ++i) {
|
|
if (i == 4 || i == 6 || i == 8 || i == 10)
|
|
j++;
|
|
s[2 * i + j] = kHexmap[(data_[data_.size() - i - 1] & 0xf0) >> 4];
|
|
s[2 * i + 1 + j] = kHexmap[(data_[data_.size() - i - 1] & 0x0f)];
|
|
}
|
|
return s;
|
|
}
|
|
|
|
} // namespace base
|
|
} // namespace perfetto
|
|
// gen_amalgamated begin source: src/base/virtual_destructors.cc
|
|
/*
|
|
* Copyright (C) 2017 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/base/task_runner.h"
|
|
|
|
// This translation unit contains the definitions for the destructor of pure
|
|
// virtual interfaces for the current build target. The alternative would be
|
|
// introducing a one-liner .cc file for each pure virtual interface, which is
|
|
// overkill. This is for compliance with -Wweak-vtables.
|
|
|
|
namespace perfetto {
|
|
namespace base {
|
|
|
|
TaskRunner::~TaskRunner() = default;
|
|
|
|
} // namespace base
|
|
} // namespace perfetto
|
|
// gen_amalgamated begin source: src/base/waitable_event.cc
|
|
// gen_amalgamated begin header: include/perfetto/ext/base/waitable_event.h
|
|
/*
|
|
* Copyright (C) 2019 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
#ifndef INCLUDE_PERFETTO_EXT_BASE_WAITABLE_EVENT_H_
|
|
#define INCLUDE_PERFETTO_EXT_BASE_WAITABLE_EVENT_H_
|
|
|
|
#include <condition_variable>
|
|
#include <mutex>
|
|
|
|
namespace perfetto {
|
|
namespace base {
|
|
|
|
// A waitable event for cross-thread synchronization.
|
|
// All methods on this class can be called from any thread.
|
|
class WaitableEvent {
|
|
public:
|
|
WaitableEvent();
|
|
~WaitableEvent();
|
|
WaitableEvent(const WaitableEvent&) = delete;
|
|
WaitableEvent operator=(const WaitableEvent&) = delete;
|
|
|
|
// Synchronously block until the event is notified.
|
|
void Wait();
|
|
|
|
// Signal the event, waking up blocked waiters.
|
|
void Notify();
|
|
|
|
private:
|
|
std::mutex mutex_;
|
|
std::condition_variable event_;
|
|
bool notified_ = false;
|
|
};
|
|
|
|
} // namespace base
|
|
} // namespace perfetto
|
|
|
|
#endif // INCLUDE_PERFETTO_EXT_BASE_WAITABLE_EVENT_H_
|
|
/*
|
|
* Copyright (C) 2019 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/waitable_event.h"
|
|
|
|
namespace perfetto {
|
|
namespace base {
|
|
|
|
WaitableEvent::WaitableEvent() = default;
|
|
WaitableEvent::~WaitableEvent() = default;
|
|
|
|
void WaitableEvent::Wait() {
|
|
std::unique_lock<std::mutex> lock(mutex_);
|
|
return event_.wait(lock, [this] { return notified_; });
|
|
}
|
|
|
|
void WaitableEvent::Notify() {
|
|
std::unique_lock<std::mutex> lock(mutex_);
|
|
notified_ = true;
|
|
event_.notify_all();
|
|
}
|
|
|
|
} // namespace base
|
|
} // namespace perfetto
|
|
// gen_amalgamated begin source: src/base/watchdog_posix.cc
|
|
// gen_amalgamated begin header: include/perfetto/ext/base/watchdog.h
|
|
// gen_amalgamated begin header: include/perfetto/ext/base/watchdog_noop.h
|
|
/*
|
|
* Copyright (C) 2018 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
#ifndef INCLUDE_PERFETTO_EXT_BASE_WATCHDOG_NOOP_H_
|
|
#define INCLUDE_PERFETTO_EXT_BASE_WATCHDOG_NOOP_H_
|
|
|
|
#include <stdint.h>
|
|
|
|
namespace perfetto {
|
|
namespace base {
|
|
|
|
class Watchdog {
|
|
public:
|
|
class Timer {
|
|
public:
|
|
// Define an empty dtor to avoid "unused variable" errors on the call site.
|
|
Timer() {}
|
|
Timer(const Timer&) {}
|
|
~Timer() {}
|
|
};
|
|
static Watchdog* GetInstance() {
|
|
static Watchdog* watchdog = new Watchdog();
|
|
return watchdog;
|
|
}
|
|
Timer CreateFatalTimer(uint32_t /*ms*/) { return Timer(); }
|
|
void Start() {}
|
|
void SetMemoryLimit(uint64_t /*bytes*/, uint32_t /*window_ms*/) {}
|
|
void SetCpuLimit(uint32_t /*percentage*/, uint32_t /*window_ms*/) {}
|
|
};
|
|
|
|
} // namespace base
|
|
} // namespace perfetto
|
|
|
|
#endif // INCLUDE_PERFETTO_EXT_BASE_WATCHDOG_NOOP_H_
|
|
/*
|
|
* Copyright (C) 2018 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
#ifndef INCLUDE_PERFETTO_EXT_BASE_WATCHDOG_H_
|
|
#define INCLUDE_PERFETTO_EXT_BASE_WATCHDOG_H_
|
|
|
|
#include <functional>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/base/build_config.h"
|
|
|
|
// The POSIX watchdog is only supported on Linux and Android in non-embedder
|
|
// builds.
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_WATCHDOG)
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/watchdog_posix.h"
|
|
#else
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/watchdog_noop.h"
|
|
#endif
|
|
|
|
namespace perfetto {
|
|
namespace base {
|
|
|
|
// Make the limits more relaxed on desktop, where multi-GB traces are likely.
|
|
// Multi-GB traces can take bursts of cpu time to write into disk at the end of
|
|
// the trace.
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_OS_ANDROID)
|
|
constexpr uint32_t kWatchdogDefaultCpuLimit = 75;
|
|
constexpr uint32_t kWatchdogDefaultCpuWindow = 5 * 60 * 1000; // 5 minutes.
|
|
#else
|
|
constexpr uint32_t kWatchdogDefaultCpuLimit = 90;
|
|
constexpr uint32_t kWatchdogDefaultCpuWindow = 10 * 60 * 1000; // 10 minutes.
|
|
#endif
|
|
|
|
// The default memory margin we give to our processes. This is used as as a
|
|
// constant to put on top of the trace buffers.
|
|
constexpr uint64_t kWatchdogDefaultMemorySlack = 32 * 1024 * 1024; // 32 MiB.
|
|
constexpr uint32_t kWatchdogDefaultMemoryWindow = 30 * 1000; // 30 seconds.
|
|
|
|
inline void RunTaskWithWatchdogGuard(const std::function<void()>& task) {
|
|
// Maximum time a single task can take in a TaskRunner before the
|
|
// program suicides.
|
|
constexpr int64_t kWatchdogMillis = 30000; // 30s
|
|
|
|
Watchdog::Timer handle =
|
|
base::Watchdog::GetInstance()->CreateFatalTimer(kWatchdogMillis);
|
|
task();
|
|
|
|
// Suppress unused variable warnings in the client library amalgamated build.
|
|
(void)kWatchdogDefaultCpuLimit;
|
|
(void)kWatchdogDefaultCpuWindow;
|
|
(void)kWatchdogDefaultMemorySlack;
|
|
(void)kWatchdogDefaultMemoryWindow;
|
|
}
|
|
|
|
} // namespace base
|
|
} // namespace perfetto
|
|
|
|
#endif // INCLUDE_PERFETTO_EXT_BASE_WATCHDOG_H_
|
|
/*
|
|
* Copyright (C) 2018 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/watchdog.h"
|
|
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_WATCHDOG)
|
|
|
|
#include <fcntl.h>
|
|
#include <inttypes.h>
|
|
#include <signal.h>
|
|
#include <stdint.h>
|
|
|
|
#include <fstream>
|
|
#include <thread>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/base/build_config.h"
|
|
// gen_amalgamated expanded: #include "perfetto/base/logging.h"
|
|
// gen_amalgamated expanded: #include "perfetto/base/thread_utils.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/scoped_file.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/utils.h"
|
|
|
|
namespace perfetto {
|
|
namespace base {
|
|
|
|
namespace {
|
|
|
|
constexpr uint32_t kDefaultPollingInterval = 30 * 1000;
|
|
|
|
bool IsMultipleOf(uint32_t number, uint32_t divisor) {
|
|
return number >= divisor && number % divisor == 0;
|
|
}
|
|
|
|
double MeanForArray(const uint64_t array[], size_t size) {
|
|
uint64_t total = 0;
|
|
for (size_t i = 0; i < size; i++) {
|
|
total += array[i];
|
|
}
|
|
return static_cast<double>(total / size);
|
|
|
|
}
|
|
|
|
} // namespace
|
|
|
|
bool ReadProcStat(int fd, ProcStat* out) {
|
|
char c[512];
|
|
size_t c_pos = 0;
|
|
while (c_pos < sizeof(c) - 1) {
|
|
ssize_t rd = PERFETTO_EINTR(read(fd, c + c_pos, sizeof(c) - c_pos));
|
|
if (rd < 0) {
|
|
PERFETTO_ELOG("Failed to read stat file to enforce resource limits.");
|
|
return false;
|
|
}
|
|
if (rd == 0)
|
|
break;
|
|
c_pos += static_cast<size_t>(rd);
|
|
}
|
|
PERFETTO_CHECK(c_pos < sizeof(c));
|
|
c[c_pos] = '\0';
|
|
|
|
if (sscanf(c,
|
|
"%*d %*s %*c %*d %*d %*d %*d %*d %*u %*u %*u %*u %*u %lu"
|
|
"%lu %*d %*d %*d %*d %*d %*d %*u %*u %ld",
|
|
&out->utime, &out->stime, &out->rss_pages) != 3) {
|
|
PERFETTO_ELOG("Invalid stat format: %s", c);
|
|
return false;
|
|
}
|
|
return true;
|
|
}
|
|
|
|
Watchdog::Watchdog(uint32_t polling_interval_ms)
|
|
: polling_interval_ms_(polling_interval_ms) {}
|
|
|
|
Watchdog::~Watchdog() {
|
|
if (!thread_.joinable()) {
|
|
PERFETTO_DCHECK(!enabled_);
|
|
return;
|
|
}
|
|
PERFETTO_DCHECK(enabled_);
|
|
enabled_ = false;
|
|
exit_signal_.notify_one();
|
|
thread_.join();
|
|
}
|
|
|
|
Watchdog* Watchdog::GetInstance() {
|
|
static Watchdog* watchdog = new Watchdog(kDefaultPollingInterval);
|
|
return watchdog;
|
|
}
|
|
|
|
Watchdog::Timer Watchdog::CreateFatalTimer(uint32_t ms) {
|
|
if (!enabled_.load(std::memory_order_relaxed))
|
|
return Watchdog::Timer(0);
|
|
|
|
return Watchdog::Timer(ms);
|
|
}
|
|
|
|
void Watchdog::Start() {
|
|
std::lock_guard<std::mutex> guard(mutex_);
|
|
if (thread_.joinable()) {
|
|
PERFETTO_DCHECK(enabled_);
|
|
} else {
|
|
PERFETTO_DCHECK(!enabled_);
|
|
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_OS_LINUX) || \
|
|
PERFETTO_BUILDFLAG(PERFETTO_OS_ANDROID)
|
|
// Kick the thread to start running but only on Android or Linux.
|
|
enabled_ = true;
|
|
thread_ = std::thread(&Watchdog::ThreadMain, this);
|
|
#endif
|
|
}
|
|
}
|
|
|
|
void Watchdog::SetMemoryLimit(uint64_t bytes, uint32_t window_ms) {
|
|
// Update the fields under the lock.
|
|
std::lock_guard<std::mutex> guard(mutex_);
|
|
|
|
PERFETTO_CHECK(IsMultipleOf(window_ms, polling_interval_ms_) || bytes == 0);
|
|
|
|
size_t size = bytes == 0 ? 0 : window_ms / polling_interval_ms_ + 1;
|
|
memory_window_bytes_.Reset(size);
|
|
memory_limit_bytes_ = bytes;
|
|
}
|
|
|
|
void Watchdog::SetCpuLimit(uint32_t percentage, uint32_t window_ms) {
|
|
std::lock_guard<std::mutex> guard(mutex_);
|
|
|
|
PERFETTO_CHECK(percentage <= 100);
|
|
PERFETTO_CHECK(IsMultipleOf(window_ms, polling_interval_ms_) ||
|
|
percentage == 0);
|
|
|
|
size_t size = percentage == 0 ? 0 : window_ms / polling_interval_ms_ + 1;
|
|
cpu_window_time_ticks_.Reset(size);
|
|
cpu_limit_percentage_ = percentage;
|
|
}
|
|
|
|
void Watchdog::ThreadMain() {
|
|
base::ScopedFile stat_fd(base::OpenFile("/proc/self/stat", O_RDONLY));
|
|
if (!stat_fd) {
|
|
PERFETTO_ELOG("Failed to open stat file to enforce resource limits.");
|
|
return;
|
|
}
|
|
|
|
std::unique_lock<std::mutex> guard(mutex_);
|
|
for (;;) {
|
|
exit_signal_.wait_for(guard,
|
|
std::chrono::milliseconds(polling_interval_ms_));
|
|
if (!enabled_)
|
|
return;
|
|
|
|
lseek(stat_fd.get(), 0, SEEK_SET);
|
|
|
|
ProcStat stat;
|
|
if (!ReadProcStat(stat_fd.get(), &stat)) {
|
|
return;
|
|
}
|
|
|
|
uint64_t cpu_time = stat.utime + stat.stime;
|
|
uint64_t rss_bytes =
|
|
static_cast<uint64_t>(stat.rss_pages) * base::GetSysPageSize();
|
|
|
|
CheckMemory(rss_bytes);
|
|
CheckCpu(cpu_time);
|
|
}
|
|
}
|
|
|
|
void Watchdog::CheckMemory(uint64_t rss_bytes) {
|
|
if (memory_limit_bytes_ == 0)
|
|
return;
|
|
|
|
// Add the current stat value to the ring buffer and check that the mean
|
|
// remains under our threshold.
|
|
if (memory_window_bytes_.Push(rss_bytes)) {
|
|
if (memory_window_bytes_.Mean() > static_cast<double>(memory_limit_bytes_)) {
|
|
PERFETTO_ELOG(
|
|
"Memory watchdog trigger. Memory window of %f bytes is above the "
|
|
"%" PRIu64 " bytes limit.",
|
|
memory_window_bytes_.Mean(), memory_limit_bytes_);
|
|
kill(getpid(), SIGABRT);
|
|
}
|
|
}
|
|
}
|
|
|
|
void Watchdog::CheckCpu(uint64_t cpu_time) {
|
|
if (cpu_limit_percentage_ == 0)
|
|
return;
|
|
|
|
// Add the cpu time to the ring buffer.
|
|
if (cpu_window_time_ticks_.Push(cpu_time)) {
|
|
// Compute the percentage over the whole window and check that it remains
|
|
// under the threshold.
|
|
uint64_t difference_ticks = cpu_window_time_ticks_.NewestWhenFull() -
|
|
cpu_window_time_ticks_.OldestWhenFull();
|
|
double window_interval_ticks =
|
|
(static_cast<double>(WindowTimeForRingBuffer(cpu_window_time_ticks_)) /
|
|
1000.0) *
|
|
static_cast<double>(sysconf(_SC_CLK_TCK));
|
|
double percentage = static_cast<double>(difference_ticks) /
|
|
static_cast<double>(window_interval_ticks) * 100;
|
|
if (percentage > cpu_limit_percentage_) {
|
|
PERFETTO_ELOG("CPU watchdog trigger. %f%% CPU use is above the %" PRIu32
|
|
"%% CPU limit.",
|
|
percentage, cpu_limit_percentage_);
|
|
kill(getpid(), SIGABRT);
|
|
}
|
|
}
|
|
}
|
|
|
|
uint32_t Watchdog::WindowTimeForRingBuffer(const WindowedInterval& window) {
|
|
return static_cast<uint32_t>(window.size() - 1) * polling_interval_ms_;
|
|
}
|
|
|
|
bool Watchdog::WindowedInterval::Push(uint64_t sample) {
|
|
// Add the sample to the current position in the ring buffer.
|
|
buffer_[position_] = sample;
|
|
|
|
// Update the position with next one circularily.
|
|
position_ = (position_ + 1) % size_;
|
|
|
|
// Set the filled flag the first time we wrap.
|
|
filled_ = filled_ || position_ == 0;
|
|
return filled_;
|
|
}
|
|
|
|
double Watchdog::WindowedInterval::Mean() const {
|
|
return MeanForArray(buffer_.get(), size_);
|
|
}
|
|
|
|
void Watchdog::WindowedInterval::Clear() {
|
|
position_ = 0;
|
|
buffer_.reset(new uint64_t[size_]());
|
|
}
|
|
|
|
void Watchdog::WindowedInterval::Reset(size_t new_size) {
|
|
position_ = 0;
|
|
size_ = new_size;
|
|
buffer_.reset(new_size == 0 ? nullptr : new uint64_t[new_size]());
|
|
}
|
|
|
|
Watchdog::Timer::Timer(uint32_t ms) {
|
|
if (!ms)
|
|
return; // No-op timer created when the watchdog is disabled.
|
|
|
|
struct sigevent sev = {};
|
|
sev.sigev_notify = SIGEV_THREAD_ID;
|
|
sev._sigev_un._tid = base::GetThreadId();
|
|
sev.sigev_signo = SIGABRT;
|
|
PERFETTO_CHECK(timer_create(CLOCK_MONOTONIC, &sev, &timerid_) != -1);
|
|
struct itimerspec its = {};
|
|
its.it_value.tv_sec = ms / 1000;
|
|
its.it_value.tv_nsec = 1000000L * (ms % 1000);
|
|
PERFETTO_CHECK(timer_settime(timerid_, 0, &its, nullptr) != -1);
|
|
}
|
|
|
|
Watchdog::Timer::~Timer() {
|
|
if (timerid_ != nullptr) {
|
|
timer_delete(timerid_);
|
|
}
|
|
}
|
|
|
|
Watchdog::Timer::Timer(Timer&& other) noexcept {
|
|
timerid_ = other.timerid_;
|
|
other.timerid_ = nullptr;
|
|
}
|
|
|
|
} // namespace base
|
|
} // namespace perfetto
|
|
|
|
#endif // PERFETTO_BUILDFLAG(PERFETTO_WATCHDOG)
|
|
// gen_amalgamated begin source: src/base/event_fd.cc
|
|
// gen_amalgamated begin header: include/perfetto/ext/base/event_fd.h
|
|
/*
|
|
* Copyright (C) 2018 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
#ifndef INCLUDE_PERFETTO_EXT_BASE_EVENT_FD_H_
|
|
#define INCLUDE_PERFETTO_EXT_BASE_EVENT_FD_H_
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/base/build_config.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/scoped_file.h"
|
|
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_OS_LINUX) || \
|
|
PERFETTO_BUILDFLAG(PERFETTO_OS_ANDROID)
|
|
#define PERFETTO_USE_EVENTFD() 1
|
|
#else
|
|
#define PERFETTO_USE_EVENTFD() 0
|
|
#endif
|
|
|
|
namespace perfetto {
|
|
namespace base {
|
|
|
|
// A waitable event that can be used with poll/select.
|
|
// This is really a wrapper around eventfd_create with a pipe-based fallback
|
|
// for other platforms where eventfd is not supported.
|
|
class EventFd {
|
|
public:
|
|
EventFd();
|
|
~EventFd();
|
|
EventFd(EventFd&&) noexcept = default;
|
|
EventFd& operator=(EventFd&&) = default;
|
|
|
|
// The non-blocking file descriptor that can be polled to wait for the event.
|
|
int fd() const { return fd_.get(); }
|
|
|
|
// Can be called from any thread.
|
|
void Notify();
|
|
|
|
// Can be called from any thread. If more Notify() are queued a Clear() call
|
|
// can clear all of them (up to 16 per call).
|
|
void Clear();
|
|
|
|
private:
|
|
// The eventfd, when eventfd is supported, otherwise this is the read end of
|
|
// the pipe for fallback mode.
|
|
ScopedFile fd_;
|
|
|
|
#if !PERFETTO_USE_EVENTFD()
|
|
// The write end of the wakeup pipe.
|
|
ScopedFile write_fd_;
|
|
#endif
|
|
};
|
|
|
|
} // namespace base
|
|
} // namespace perfetto
|
|
|
|
#endif // INCLUDE_PERFETTO_EXT_BASE_EVENT_FD_H_
|
|
/*
|
|
* Copyright (C) 2018 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/base/build_config.h"
|
|
#if !PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
|
|
|
|
#include <stdint.h>
|
|
#include <unistd.h>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/base/logging.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/event_fd.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/pipe.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/utils.h"
|
|
|
|
#if PERFETTO_USE_EVENTFD()
|
|
#include <sys/eventfd.h>
|
|
#endif
|
|
|
|
namespace perfetto {
|
|
namespace base {
|
|
|
|
EventFd::EventFd() {
|
|
#if PERFETTO_USE_EVENTFD()
|
|
fd_.reset(eventfd(/* start value */ 0, EFD_CLOEXEC | EFD_NONBLOCK));
|
|
PERFETTO_CHECK(fd_);
|
|
#else
|
|
// Make the pipe non-blocking so that we never block the waking thread (either
|
|
// the main thread or another one) when scheduling a wake-up.
|
|
Pipe pipe = Pipe::Create(Pipe::kBothNonBlock);
|
|
fd_ = std::move(pipe.rd);
|
|
write_fd_ = std::move(pipe.wr);
|
|
#endif // !PERFETTO_USE_EVENTFD()
|
|
}
|
|
|
|
EventFd::~EventFd() = default;
|
|
|
|
void EventFd::Notify() {
|
|
const uint64_t value = 1;
|
|
|
|
#if PERFETTO_USE_EVENTFD()
|
|
ssize_t ret = write(fd_.get(), &value, sizeof(value));
|
|
#else
|
|
ssize_t ret = write(write_fd_.get(), &value, sizeof(uint8_t));
|
|
#endif
|
|
|
|
if (ret <= 0 && errno != EAGAIN) {
|
|
PERFETTO_DFATAL("write()");
|
|
}
|
|
}
|
|
|
|
void EventFd::Clear() {
|
|
#if PERFETTO_USE_EVENTFD()
|
|
uint64_t value;
|
|
ssize_t ret = read(fd_.get(), &value, sizeof(value));
|
|
#else
|
|
// Drain the byte(s) written to the wake-up pipe. We can potentially read
|
|
// more than one byte if several wake-ups have been scheduled.
|
|
char buffer[16];
|
|
ssize_t ret = read(fd_.get(), &buffer[0], sizeof(buffer));
|
|
#endif
|
|
if (ret <= 0 && errno != EAGAIN)
|
|
PERFETTO_DPLOG("read()");
|
|
}
|
|
|
|
} // namespace base
|
|
} // namespace perfetto
|
|
|
|
#endif // !PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
|
|
// gen_amalgamated begin source: src/base/pipe.cc
|
|
/*
|
|
* Copyright (C) 2018 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/base/build_config.h"
|
|
#if !PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/pipe.h"
|
|
|
|
#include <sys/types.h>
|
|
#include <unistd.h>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/base/logging.h"
|
|
|
|
namespace perfetto {
|
|
namespace base {
|
|
|
|
Pipe::Pipe() = default;
|
|
Pipe::Pipe(Pipe&&) noexcept = default;
|
|
Pipe& Pipe::operator=(Pipe&&) = default;
|
|
|
|
Pipe Pipe::Create(Flags flags) {
|
|
int fds[2];
|
|
PERFETTO_CHECK(pipe(fds) == 0);
|
|
Pipe p;
|
|
p.rd.reset(fds[0]);
|
|
p.wr.reset(fds[1]);
|
|
|
|
PERFETTO_CHECK(fcntl(*p.rd, F_SETFD, FD_CLOEXEC) == 0);
|
|
PERFETTO_CHECK(fcntl(*p.wr, F_SETFD, FD_CLOEXEC) == 0);
|
|
|
|
if (flags == kBothNonBlock || flags == kRdNonBlock) {
|
|
int cur_flags = fcntl(*p.rd, F_GETFL, 0);
|
|
PERFETTO_CHECK(cur_flags >= 0);
|
|
PERFETTO_CHECK(fcntl(*p.rd, F_SETFL, cur_flags | O_NONBLOCK) == 0);
|
|
}
|
|
|
|
if (flags == kBothNonBlock || flags == kWrNonBlock) {
|
|
int cur_flags = fcntl(*p.wr, F_GETFL, 0);
|
|
PERFETTO_CHECK(cur_flags >= 0);
|
|
PERFETTO_CHECK(fcntl(*p.wr, F_SETFL, cur_flags | O_NONBLOCK) == 0);
|
|
}
|
|
return p;
|
|
}
|
|
|
|
} // namespace base
|
|
} // namespace perfetto
|
|
|
|
#endif // !PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
|
|
// gen_amalgamated begin source: src/base/temp_file.cc
|
|
// gen_amalgamated begin header: include/perfetto/ext/base/temp_file.h
|
|
/*
|
|
* Copyright (C) 2018 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
#ifndef INCLUDE_PERFETTO_EXT_BASE_TEMP_FILE_H_
|
|
#define INCLUDE_PERFETTO_EXT_BASE_TEMP_FILE_H_
|
|
|
|
#include <string>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/scoped_file.h"
|
|
|
|
namespace perfetto {
|
|
namespace base {
|
|
|
|
std::string GetSysTempDir();
|
|
|
|
class TempFile {
|
|
public:
|
|
static TempFile CreateUnlinked();
|
|
static TempFile Create();
|
|
|
|
TempFile(TempFile&&) noexcept;
|
|
TempFile& operator=(TempFile&&);
|
|
~TempFile();
|
|
|
|
const std::string& path() const { return path_; }
|
|
int fd() const { return *fd_; }
|
|
int operator*() const { return *fd_; }
|
|
|
|
// Unlinks the file from the filesystem but keeps the fd() open.
|
|
// It is safe to call this multiple times.
|
|
void Unlink();
|
|
|
|
// Releases the underlying file descriptor. Will unlink the file from the
|
|
// filesystem if it was created via CreateUnlinked().
|
|
ScopedFile ReleaseFD();
|
|
|
|
private:
|
|
TempFile();
|
|
TempFile(const TempFile&) = delete;
|
|
TempFile& operator=(const TempFile&) = delete;
|
|
|
|
ScopedFile fd_;
|
|
std::string path_;
|
|
};
|
|
|
|
class TempDir {
|
|
public:
|
|
static TempDir Create();
|
|
|
|
TempDir(TempDir&&) noexcept;
|
|
TempDir& operator=(TempDir&&);
|
|
~TempDir();
|
|
|
|
const std::string& path() const { return path_; }
|
|
|
|
private:
|
|
TempDir();
|
|
TempDir(const TempDir&) = delete;
|
|
TempDir& operator=(const TempDir&) = delete;
|
|
|
|
std::string path_;
|
|
};
|
|
|
|
} // namespace base
|
|
} // namespace perfetto
|
|
|
|
#endif // INCLUDE_PERFETTO_EXT_BASE_TEMP_FILE_H_
|
|
/*
|
|
* Copyright (C) 2018 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/base/build_config.h"
|
|
#if !PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/temp_file.h"
|
|
|
|
#include <stdlib.h>
|
|
#include <unistd.h>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/string_utils.h"
|
|
|
|
namespace perfetto {
|
|
namespace base {
|
|
|
|
std::string GetSysTempDir() {
|
|
const char* tmpdir = getenv("TMPDIR");
|
|
if (tmpdir)
|
|
return base::StripSuffix(tmpdir, "/");
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_OS_ANDROID)
|
|
return "/data/local/tmp";
|
|
#else
|
|
return "/tmp";
|
|
#endif
|
|
}
|
|
|
|
// static
|
|
TempFile TempFile::Create() {
|
|
TempFile temp_file;
|
|
temp_file.path_ = GetSysTempDir() + "/perfetto-XXXXXXXX";
|
|
temp_file.fd_.reset(mkstemp(&temp_file.path_[0]));
|
|
if (PERFETTO_UNLIKELY(!temp_file.fd_)) {
|
|
PERFETTO_FATAL("Could not create temp file %s", temp_file.path_.c_str());
|
|
}
|
|
return temp_file;
|
|
}
|
|
|
|
// static
|
|
TempFile TempFile::CreateUnlinked() {
|
|
TempFile temp_file = TempFile::Create();
|
|
temp_file.Unlink();
|
|
return temp_file;
|
|
}
|
|
|
|
TempFile::TempFile() = default;
|
|
|
|
TempFile::~TempFile() {
|
|
Unlink();
|
|
}
|
|
|
|
ScopedFile TempFile::ReleaseFD() {
|
|
Unlink();
|
|
return std::move(fd_);
|
|
}
|
|
|
|
void TempFile::Unlink() {
|
|
if (path_.empty())
|
|
return;
|
|
PERFETTO_CHECK(unlink(path_.c_str()) == 0);
|
|
path_.clear();
|
|
}
|
|
|
|
TempFile::TempFile(TempFile&&) noexcept = default;
|
|
TempFile& TempFile::operator=(TempFile&&) = default;
|
|
|
|
// static
|
|
TempDir TempDir::Create() {
|
|
TempDir temp_dir;
|
|
temp_dir.path_ = GetSysTempDir() + "/perfetto-XXXXXXXX";
|
|
PERFETTO_CHECK(mkdtemp(&temp_dir.path_[0]));
|
|
return temp_dir;
|
|
}
|
|
|
|
TempDir::TempDir() = default;
|
|
|
|
TempDir::~TempDir() {
|
|
PERFETTO_CHECK(rmdir(path_.c_str()) == 0);
|
|
}
|
|
|
|
} // namespace base
|
|
} // namespace perfetto
|
|
|
|
#endif // !PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
|
|
// gen_amalgamated begin source: src/base/thread_task_runner.cc
|
|
// gen_amalgamated begin header: include/perfetto/ext/base/thread_task_runner.h
|
|
// gen_amalgamated begin header: include/perfetto/ext/base/unix_task_runner.h
|
|
/*
|
|
* Copyright (C) 2017 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
#ifndef INCLUDE_PERFETTO_EXT_BASE_UNIX_TASK_RUNNER_H_
|
|
#define INCLUDE_PERFETTO_EXT_BASE_UNIX_TASK_RUNNER_H_
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/base/build_config.h"
|
|
// gen_amalgamated expanded: #include "perfetto/base/task_runner.h"
|
|
// gen_amalgamated expanded: #include "perfetto/base/thread_utils.h"
|
|
// gen_amalgamated expanded: #include "perfetto/base/time.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/event_fd.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/scoped_file.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/thread_checker.h"
|
|
|
|
#include <poll.h>
|
|
#include <chrono>
|
|
#include <deque>
|
|
#include <map>
|
|
#include <mutex>
|
|
#include <vector>
|
|
|
|
namespace perfetto {
|
|
namespace base {
|
|
|
|
// Runs a task runner on the current thread.
|
|
//
|
|
// Implementation note: we currently assume (and enforce in debug builds) that
|
|
// Run() is called from the thread that constructed the UnixTaskRunner. This is
|
|
// not strictly necessary, and we could instead track the thread that invokes
|
|
// Run(). However, a related property that *might* be important to enforce is
|
|
// that the destructor runs on the task-running thread. Otherwise, if there are
|
|
// still-pending tasks at the time of destruction, we would destroy those
|
|
// outside of the task thread (which might be unexpected to the caller). On the
|
|
// other hand, the std::function task interface discourages use of any
|
|
// resource-owning tasks (as the callable needs to be copyable), so this might
|
|
// not be important in practice.
|
|
//
|
|
// TODO(rsavitski): consider adding a thread-check in the destructor, after
|
|
// auditing existing usages.
|
|
class UnixTaskRunner : public TaskRunner {
|
|
public:
|
|
UnixTaskRunner();
|
|
~UnixTaskRunner() override;
|
|
|
|
// Start executing tasks. Doesn't return until Quit() is called. Run() may be
|
|
// called multiple times on the same task runner.
|
|
void Run();
|
|
void Quit();
|
|
|
|
// Checks whether there are any pending immediate tasks to run. Note that
|
|
// delayed tasks don't count even if they are due to run.
|
|
bool IsIdleForTesting();
|
|
|
|
// TaskRunner implementation:
|
|
void PostTask(std::function<void()>) override;
|
|
void PostDelayedTask(std::function<void()>, uint32_t delay_ms) override;
|
|
void AddFileDescriptorWatch(int fd, std::function<void()>) override;
|
|
void RemoveFileDescriptorWatch(int fd) override;
|
|
bool RunsTasksOnCurrentThread() const override;
|
|
|
|
// Returns true if the task runner is quitting, or has quit and hasn't been
|
|
// restarted since. Exposed primarily for ThreadTaskRunner, not necessary for
|
|
// normal use of this class.
|
|
bool QuitCalled();
|
|
|
|
private:
|
|
void WakeUp();
|
|
|
|
void UpdateWatchTasksLocked();
|
|
|
|
int GetDelayMsToNextTaskLocked() const;
|
|
void RunImmediateAndDelayedTask();
|
|
void PostFileDescriptorWatches();
|
|
void RunFileDescriptorWatch(int fd);
|
|
|
|
ThreadChecker thread_checker_;
|
|
PlatformThreadId created_thread_id_ = GetThreadId();
|
|
|
|
// On Linux, an eventfd(2) used to waking up the task runner when a new task
|
|
// is posted. Otherwise the read end of a pipe used for the same purpose.
|
|
EventFd event_;
|
|
|
|
std::vector<struct pollfd> poll_fds_;
|
|
|
|
// --- Begin lock-protected members ---
|
|
|
|
std::mutex lock_;
|
|
|
|
std::deque<std::function<void()>> immediate_tasks_;
|
|
std::multimap<TimeMillis, std::function<void()>> delayed_tasks_;
|
|
bool quit_ = false;
|
|
|
|
struct WatchTask {
|
|
std::function<void()> callback;
|
|
size_t poll_fd_index; // Index into |poll_fds_|.
|
|
};
|
|
|
|
std::map<int, WatchTask> watch_tasks_;
|
|
bool watch_tasks_changed_ = false;
|
|
|
|
// --- End lock-protected members ---
|
|
};
|
|
|
|
} // namespace base
|
|
} // namespace perfetto
|
|
|
|
#endif // INCLUDE_PERFETTO_EXT_BASE_UNIX_TASK_RUNNER_H_
|
|
/*
|
|
* Copyright (C) 2019 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
#ifndef INCLUDE_PERFETTO_EXT_BASE_THREAD_TASK_RUNNER_H_
|
|
#define INCLUDE_PERFETTO_EXT_BASE_THREAD_TASK_RUNNER_H_
|
|
|
|
#include <functional>
|
|
#include <thread>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/unix_task_runner.h"
|
|
|
|
namespace perfetto {
|
|
namespace base {
|
|
|
|
// A UnixTaskRunner backed by a dedicated task thread. Shuts down the runner and
|
|
// joins the thread upon destruction. Can be moved to transfer ownership.
|
|
//
|
|
// Guarantees that:
|
|
// * the UnixTaskRunner will be constructed and destructed on the task thread.
|
|
// * the task thread will live for the lifetime of the UnixTaskRunner.
|
|
//
|
|
class ThreadTaskRunner {
|
|
public:
|
|
static ThreadTaskRunner CreateAndStart(const std::string& name = "") {
|
|
return ThreadTaskRunner(name);
|
|
}
|
|
|
|
ThreadTaskRunner(const ThreadTaskRunner&) = delete;
|
|
ThreadTaskRunner& operator=(const ThreadTaskRunner&) = delete;
|
|
|
|
ThreadTaskRunner(ThreadTaskRunner&&) noexcept;
|
|
ThreadTaskRunner& operator=(ThreadTaskRunner&&);
|
|
~ThreadTaskRunner();
|
|
|
|
// Executes the given function on the task runner thread and blocks the caller
|
|
// thread until the function has run.
|
|
void PostTaskAndWaitForTesting(std::function<void()>);
|
|
|
|
// Can be called from another thread to get the CPU time of the thread the
|
|
// task-runner is executing on.
|
|
uint64_t GetThreadCPUTimeNsForTesting();
|
|
|
|
// Returns a pointer to the UnixTaskRunner, which is valid for the lifetime of
|
|
// this ThreadTaskRunner object (unless this object is moved-from, in which
|
|
// case the pointer remains valid for the lifetime of the new owning
|
|
// ThreadTaskRunner).
|
|
//
|
|
// Warning: do not call Quit() on the returned runner pointer, the termination
|
|
// should be handled exclusively by this class' destructor.
|
|
UnixTaskRunner* get() const { return task_runner_; }
|
|
|
|
private:
|
|
explicit ThreadTaskRunner(const std::string& name);
|
|
void RunTaskThread(std::function<void(UnixTaskRunner*)> initializer);
|
|
|
|
std::thread thread_;
|
|
std::string name_;
|
|
UnixTaskRunner* task_runner_ = nullptr;
|
|
};
|
|
|
|
} // namespace base
|
|
} // namespace perfetto
|
|
|
|
#endif // INCLUDE_PERFETTO_EXT_BASE_THREAD_TASK_RUNNER_H_
|
|
// gen_amalgamated begin header: include/perfetto/ext/base/thread_utils.h
|
|
/*
|
|
* Copyright (C) 2020 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
#ifndef INCLUDE_PERFETTO_EXT_BASE_THREAD_UTILS_H_
|
|
#define INCLUDE_PERFETTO_EXT_BASE_THREAD_UTILS_H_
|
|
|
|
#include <string>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/base/build_config.h"
|
|
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_OS_LINUX) || \
|
|
PERFETTO_BUILDFLAG(PERFETTO_OS_ANDROID) || \
|
|
PERFETTO_BUILDFLAG(PERFETTO_OS_APPLE)
|
|
#include <pthread.h>
|
|
#include <string.h>
|
|
#include <algorithm>
|
|
#endif
|
|
|
|
// Internal implementation utils that aren't as widely useful/supported as
|
|
// base/thread_utils.h.
|
|
|
|
namespace perfetto {
|
|
namespace base {
|
|
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_OS_LINUX) || \
|
|
PERFETTO_BUILDFLAG(PERFETTO_OS_ANDROID) || \
|
|
PERFETTO_BUILDFLAG(PERFETTO_OS_APPLE)
|
|
// Sets the "comm" of the calling thread to the first 15 chars of the given
|
|
// string.
|
|
inline bool MaybeSetThreadName(const std::string& name) {
|
|
char buf[16] = {};
|
|
size_t sz = std::min(name.size(), static_cast<size_t>(15));
|
|
strncpy(buf, name.c_str(), sz);
|
|
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_OS_APPLE)
|
|
return pthread_setname_np(buf) == 0;
|
|
#else
|
|
return pthread_setname_np(pthread_self(), buf) == 0;
|
|
#endif
|
|
}
|
|
#else
|
|
inline void MaybeSetThreadName(const std::string&) {}
|
|
#endif
|
|
|
|
} // namespace base
|
|
} // namespace perfetto
|
|
|
|
#endif // INCLUDE_PERFETTO_EXT_BASE_THREAD_UTILS_H_
|
|
/*
|
|
* Copyright (C) 2019 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/base/build_config.h"
|
|
#if !PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/thread_task_runner.h"
|
|
|
|
#include <condition_variable>
|
|
#include <functional>
|
|
#include <mutex>
|
|
#include <thread>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/base/logging.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/thread_utils.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/unix_task_runner.h"
|
|
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_OS_LINUX) || \
|
|
PERFETTO_BUILDFLAG(PERFETTO_OS_ANDROID)
|
|
#include <sys/prctl.h>
|
|
#endif
|
|
|
|
namespace perfetto {
|
|
namespace base {
|
|
|
|
ThreadTaskRunner::ThreadTaskRunner(ThreadTaskRunner&& other) noexcept
|
|
: thread_(std::move(other.thread_)), task_runner_(other.task_runner_) {
|
|
other.task_runner_ = nullptr;
|
|
}
|
|
|
|
ThreadTaskRunner& ThreadTaskRunner::operator=(ThreadTaskRunner&& other) {
|
|
this->~ThreadTaskRunner();
|
|
new (this) ThreadTaskRunner(std::move(other));
|
|
return *this;
|
|
}
|
|
|
|
ThreadTaskRunner::~ThreadTaskRunner() {
|
|
if (task_runner_) {
|
|
PERFETTO_CHECK(!task_runner_->QuitCalled());
|
|
task_runner_->Quit();
|
|
|
|
PERFETTO_DCHECK(thread_.joinable());
|
|
}
|
|
if (thread_.joinable())
|
|
thread_.join();
|
|
}
|
|
|
|
ThreadTaskRunner::ThreadTaskRunner(const std::string& name) : name_(name) {
|
|
std::mutex init_lock;
|
|
std::condition_variable init_cv;
|
|
|
|
std::function<void(UnixTaskRunner*)> initializer =
|
|
[this, &init_lock, &init_cv](UnixTaskRunner* task_runner) {
|
|
std::lock_guard<std::mutex> lock(init_lock);
|
|
task_runner_ = task_runner;
|
|
// Notify while still holding the lock, as init_cv ceases to exist as
|
|
// soon as the main thread observes a non-null task_runner_, and it can
|
|
// wake up spuriously (i.e. before the notify if we had unlocked before
|
|
// notifying).
|
|
init_cv.notify_one();
|
|
};
|
|
|
|
thread_ = std::thread(&ThreadTaskRunner::RunTaskThread, this,
|
|
std::move(initializer));
|
|
|
|
std::unique_lock<std::mutex> lock(init_lock);
|
|
init_cv.wait(lock, [this] { return !!task_runner_; });
|
|
}
|
|
|
|
void ThreadTaskRunner::RunTaskThread(
|
|
std::function<void(UnixTaskRunner*)> initializer) {
|
|
if (!name_.empty()) {
|
|
base::MaybeSetThreadName(name_);
|
|
}
|
|
|
|
UnixTaskRunner task_runner;
|
|
task_runner.PostTask(std::bind(std::move(initializer), &task_runner));
|
|
task_runner.Run();
|
|
}
|
|
|
|
void ThreadTaskRunner::PostTaskAndWaitForTesting(std::function<void()> fn) {
|
|
std::mutex mutex;
|
|
std::condition_variable cv;
|
|
|
|
std::unique_lock<std::mutex> lock(mutex);
|
|
bool done = false;
|
|
task_runner_->PostTask([&mutex, &cv, &done, &fn] {
|
|
fn();
|
|
|
|
std::lock_guard<std::mutex> inner_lock(mutex);
|
|
done = true;
|
|
cv.notify_one();
|
|
});
|
|
cv.wait(lock, [&done] { return done; });
|
|
}
|
|
|
|
uint64_t ThreadTaskRunner::GetThreadCPUTimeNsForTesting() {
|
|
uint64_t thread_time_ns = 0;
|
|
PostTaskAndWaitForTesting([&thread_time_ns] {
|
|
thread_time_ns = static_cast<uint64_t>(base::GetThreadCPUTimeNs().count());
|
|
});
|
|
return thread_time_ns;
|
|
}
|
|
|
|
} // namespace base
|
|
} // namespace perfetto
|
|
|
|
#endif // !PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
|
|
// gen_amalgamated begin source: src/base/unix_task_runner.cc
|
|
/*
|
|
* Copyright (C) 2017 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/base/build_config.h"
|
|
#if !PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/unix_task_runner.h"
|
|
|
|
#include <errno.h>
|
|
#include <stdlib.h>
|
|
#include <unistd.h>
|
|
|
|
#include <limits>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/watchdog.h"
|
|
|
|
namespace perfetto {
|
|
namespace base {
|
|
|
|
UnixTaskRunner::UnixTaskRunner() {
|
|
AddFileDescriptorWatch(event_.fd(), [] {
|
|
// Not reached -- see PostFileDescriptorWatches().
|
|
PERFETTO_DFATAL("Should be unreachable.");
|
|
});
|
|
}
|
|
|
|
UnixTaskRunner::~UnixTaskRunner() = default;
|
|
|
|
void UnixTaskRunner::WakeUp() {
|
|
event_.Notify();
|
|
}
|
|
|
|
void UnixTaskRunner::Run() {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
created_thread_id_ = GetThreadId();
|
|
quit_ = false;
|
|
for (;;) {
|
|
int poll_timeout_ms;
|
|
{
|
|
std::lock_guard<std::mutex> lock(lock_);
|
|
if (quit_)
|
|
return;
|
|
poll_timeout_ms = GetDelayMsToNextTaskLocked();
|
|
UpdateWatchTasksLocked();
|
|
}
|
|
int ret = PERFETTO_EINTR(poll(
|
|
&poll_fds_[0], static_cast<nfds_t>(poll_fds_.size()), poll_timeout_ms));
|
|
PERFETTO_CHECK(ret >= 0);
|
|
|
|
// To avoid starvation we always interleave all types of tasks -- immediate,
|
|
// delayed and file descriptor watches.
|
|
PostFileDescriptorWatches();
|
|
RunImmediateAndDelayedTask();
|
|
}
|
|
}
|
|
|
|
void UnixTaskRunner::Quit() {
|
|
std::lock_guard<std::mutex> lock(lock_);
|
|
quit_ = true;
|
|
WakeUp();
|
|
}
|
|
|
|
bool UnixTaskRunner::QuitCalled() {
|
|
std::lock_guard<std::mutex> lock(lock_);
|
|
return quit_;
|
|
}
|
|
|
|
bool UnixTaskRunner::IsIdleForTesting() {
|
|
std::lock_guard<std::mutex> lock(lock_);
|
|
return immediate_tasks_.empty();
|
|
}
|
|
|
|
void UnixTaskRunner::UpdateWatchTasksLocked() {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
if (!watch_tasks_changed_)
|
|
return;
|
|
watch_tasks_changed_ = false;
|
|
poll_fds_.clear();
|
|
for (auto& it : watch_tasks_) {
|
|
it.second.poll_fd_index = poll_fds_.size();
|
|
poll_fds_.push_back({it.first, POLLIN | POLLHUP, 0});
|
|
}
|
|
}
|
|
|
|
void UnixTaskRunner::RunImmediateAndDelayedTask() {
|
|
// If locking overhead becomes an issue, add a separate work queue.
|
|
std::function<void()> immediate_task;
|
|
std::function<void()> delayed_task;
|
|
TimeMillis now = GetWallTimeMs();
|
|
{
|
|
std::lock_guard<std::mutex> lock(lock_);
|
|
if (!immediate_tasks_.empty()) {
|
|
immediate_task = std::move(immediate_tasks_.front());
|
|
immediate_tasks_.pop_front();
|
|
}
|
|
if (!delayed_tasks_.empty()) {
|
|
auto it = delayed_tasks_.begin();
|
|
if (now >= it->first) {
|
|
delayed_task = std::move(it->second);
|
|
delayed_tasks_.erase(it);
|
|
}
|
|
}
|
|
}
|
|
|
|
errno = 0;
|
|
if (immediate_task)
|
|
RunTaskWithWatchdogGuard(immediate_task);
|
|
errno = 0;
|
|
if (delayed_task)
|
|
RunTaskWithWatchdogGuard(delayed_task);
|
|
}
|
|
|
|
void UnixTaskRunner::PostFileDescriptorWatches() {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
for (size_t i = 0; i < poll_fds_.size(); i++) {
|
|
if (!(poll_fds_[i].revents & (POLLIN | POLLHUP)))
|
|
continue;
|
|
poll_fds_[i].revents = 0;
|
|
|
|
// The wake-up event is handled inline to avoid an infinite recursion of
|
|
// posted tasks.
|
|
if (poll_fds_[i].fd == event_.fd()) {
|
|
event_.Clear();
|
|
continue;
|
|
}
|
|
|
|
// Binding to |this| is safe since we are the only object executing the
|
|
// task.
|
|
PostTask(std::bind(&UnixTaskRunner::RunFileDescriptorWatch, this,
|
|
poll_fds_[i].fd));
|
|
|
|
// Make the fd negative while a posted task is pending. This makes poll(2)
|
|
// ignore the fd.
|
|
PERFETTO_DCHECK(poll_fds_[i].fd >= 0);
|
|
poll_fds_[i].fd = -poll_fds_[i].fd;
|
|
}
|
|
}
|
|
|
|
void UnixTaskRunner::RunFileDescriptorWatch(int fd) {
|
|
std::function<void()> task;
|
|
{
|
|
std::lock_guard<std::mutex> lock(lock_);
|
|
auto it = watch_tasks_.find(fd);
|
|
if (it == watch_tasks_.end())
|
|
return;
|
|
// Make poll(2) pay attention to the fd again. Since another thread may have
|
|
// updated this watch we need to refresh the set first.
|
|
UpdateWatchTasksLocked();
|
|
size_t fd_index = it->second.poll_fd_index;
|
|
PERFETTO_DCHECK(fd_index < poll_fds_.size());
|
|
PERFETTO_DCHECK(::abs(poll_fds_[fd_index].fd) == fd);
|
|
poll_fds_[fd_index].fd = fd;
|
|
task = it->second.callback;
|
|
}
|
|
errno = 0;
|
|
RunTaskWithWatchdogGuard(task);
|
|
}
|
|
|
|
int UnixTaskRunner::GetDelayMsToNextTaskLocked() const {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
if (!immediate_tasks_.empty())
|
|
return 0;
|
|
if (!delayed_tasks_.empty()) {
|
|
TimeMillis diff = delayed_tasks_.begin()->first - GetWallTimeMs();
|
|
return std::max(0, static_cast<int>(diff.count()));
|
|
}
|
|
return -1;
|
|
}
|
|
|
|
void UnixTaskRunner::PostTask(std::function<void()> task) {
|
|
bool was_empty;
|
|
{
|
|
std::lock_guard<std::mutex> lock(lock_);
|
|
was_empty = immediate_tasks_.empty();
|
|
immediate_tasks_.push_back(std::move(task));
|
|
}
|
|
if (was_empty)
|
|
WakeUp();
|
|
}
|
|
|
|
void UnixTaskRunner::PostDelayedTask(std::function<void()> task,
|
|
uint32_t delay_ms) {
|
|
TimeMillis runtime = GetWallTimeMs() + TimeMillis(delay_ms);
|
|
{
|
|
std::lock_guard<std::mutex> lock(lock_);
|
|
delayed_tasks_.insert(std::make_pair(runtime, std::move(task)));
|
|
}
|
|
WakeUp();
|
|
}
|
|
|
|
void UnixTaskRunner::AddFileDescriptorWatch(int fd,
|
|
std::function<void()> task) {
|
|
PERFETTO_DCHECK(fd >= 0);
|
|
{
|
|
std::lock_guard<std::mutex> lock(lock_);
|
|
PERFETTO_DCHECK(!watch_tasks_.count(fd));
|
|
watch_tasks_[fd] = {std::move(task), SIZE_MAX};
|
|
watch_tasks_changed_ = true;
|
|
}
|
|
WakeUp();
|
|
}
|
|
|
|
void UnixTaskRunner::RemoveFileDescriptorWatch(int fd) {
|
|
PERFETTO_DCHECK(fd >= 0);
|
|
{
|
|
std::lock_guard<std::mutex> lock(lock_);
|
|
PERFETTO_DCHECK(watch_tasks_.count(fd));
|
|
watch_tasks_.erase(fd);
|
|
watch_tasks_changed_ = true;
|
|
}
|
|
// No need to schedule a wake-up for this.
|
|
}
|
|
|
|
bool UnixTaskRunner::RunsTasksOnCurrentThread() const {
|
|
return GetThreadId() == created_thread_id_;
|
|
}
|
|
|
|
} // namespace base
|
|
} // namespace perfetto
|
|
|
|
#endif // !PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
|
|
// gen_amalgamated begin source: src/protozero/field.cc
|
|
/*
|
|
* Copyright (C) 2019 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/field.h"
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/base/logging.h"
|
|
|
|
#if __BYTE_ORDER__ != __ORDER_LITTLE_ENDIAN__
|
|
// The memcpy() for fixed32/64 below needs to be adjusted if we want to
|
|
// support big endian CPUs. There doesn't seem to be a compelling need today.
|
|
#error Unimplemented for big endian archs.
|
|
#endif
|
|
|
|
namespace protozero {
|
|
|
|
template <typename Container>
|
|
void Field::SerializeAndAppendToInternal(Container* dst) const {
|
|
namespace pu = proto_utils;
|
|
size_t initial_size = dst->size();
|
|
dst->resize(initial_size + pu::kMaxSimpleFieldEncodedSize + size_);
|
|
uint8_t* start = reinterpret_cast<uint8_t*>(&(*dst)[initial_size]);
|
|
uint8_t* wptr = start;
|
|
switch (type_) {
|
|
case static_cast<int>(pu::ProtoWireType::kVarInt): {
|
|
wptr = pu::WriteVarInt(pu::MakeTagVarInt(id_), wptr);
|
|
wptr = pu::WriteVarInt(int_value_, wptr);
|
|
break;
|
|
}
|
|
case static_cast<int>(pu::ProtoWireType::kFixed32): {
|
|
wptr = pu::WriteVarInt(pu::MakeTagFixed<uint32_t>(id_), wptr);
|
|
uint32_t value32 = static_cast<uint32_t>(int_value_);
|
|
memcpy(wptr, &value32, sizeof(value32));
|
|
wptr += sizeof(uint32_t);
|
|
break;
|
|
}
|
|
case static_cast<int>(pu::ProtoWireType::kFixed64): {
|
|
wptr = pu::WriteVarInt(pu::MakeTagFixed<uint64_t>(id_), wptr);
|
|
memcpy(wptr, &int_value_, sizeof(int_value_));
|
|
wptr += sizeof(uint64_t);
|
|
break;
|
|
}
|
|
case static_cast<int>(pu::ProtoWireType::kLengthDelimited): {
|
|
ConstBytes payload = as_bytes();
|
|
wptr = pu::WriteVarInt(pu::MakeTagLengthDelimited(id_), wptr);
|
|
wptr = pu::WriteVarInt(payload.size, wptr);
|
|
memcpy(wptr, payload.data, payload.size);
|
|
wptr += payload.size;
|
|
break;
|
|
}
|
|
default:
|
|
PERFETTO_FATAL("Unknown field type %u", type_);
|
|
}
|
|
size_t written_size = static_cast<size_t>(wptr - start);
|
|
PERFETTO_DCHECK(written_size > 0 && written_size < pu::kMaxMessageLength);
|
|
PERFETTO_DCHECK(initial_size + written_size <= dst->size());
|
|
dst->resize(initial_size + written_size);
|
|
}
|
|
|
|
void Field::SerializeAndAppendTo(std::string* dst) const {
|
|
SerializeAndAppendToInternal(dst);
|
|
}
|
|
|
|
void Field::SerializeAndAppendTo(std::vector<uint8_t>* dst) const {
|
|
SerializeAndAppendToInternal(dst);
|
|
}
|
|
|
|
} // namespace protozero
|
|
// gen_amalgamated begin source: src/protozero/message.cc
|
|
/*
|
|
* Copyright (C) 2017 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
|
|
|
|
#include <atomic>
|
|
#include <type_traits>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/base/logging.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/message_arena.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/message_handle.h"
|
|
|
|
#if __BYTE_ORDER__ != __ORDER_LITTLE_ENDIAN__
|
|
// The memcpy() for float and double below needs to be adjusted if we want to
|
|
// support big endian CPUs. There doesn't seem to be a compelling need today.
|
|
#error Unimplemented for big endian archs.
|
|
#endif
|
|
|
|
namespace protozero {
|
|
|
|
namespace {
|
|
|
|
#if PERFETTO_DCHECK_IS_ON()
|
|
std::atomic<uint32_t> g_generation;
|
|
#endif
|
|
|
|
} // namespace
|
|
|
|
// Do NOT put any code in the constructor or use default initialization.
|
|
// Use the Reset() method below instead.
|
|
|
|
// This method is called to initialize both root and nested messages.
|
|
void Message::Reset(ScatteredStreamWriter* stream_writer, MessageArena* arena) {
|
|
// Older versions of libstdcxx don't have is_trivially_constructible.
|
|
#if !defined(__GLIBCXX__) || __GLIBCXX__ >= 20170516
|
|
static_assert(std::is_trivially_constructible<Message>::value,
|
|
"Message must be trivially constructible");
|
|
#endif
|
|
|
|
static_assert(std::is_trivially_destructible<Message>::value,
|
|
"Message must be trivially destructible");
|
|
stream_writer_ = stream_writer;
|
|
arena_ = arena;
|
|
size_ = 0;
|
|
size_field_ = nullptr;
|
|
size_already_written_ = 0;
|
|
nested_message_ = nullptr;
|
|
finalized_ = false;
|
|
#if PERFETTO_DCHECK_IS_ON()
|
|
handle_ = nullptr;
|
|
generation_ = g_generation.fetch_add(1, std::memory_order_relaxed);
|
|
#endif
|
|
}
|
|
|
|
void Message::AppendString(uint32_t field_id, const char* str) {
|
|
AppendBytes(field_id, str, strlen(str));
|
|
}
|
|
|
|
void Message::AppendBytes(uint32_t field_id, const void* src, size_t size) {
|
|
if (nested_message_)
|
|
EndNestedMessage();
|
|
|
|
PERFETTO_DCHECK(size < proto_utils::kMaxMessageLength);
|
|
// Write the proto preamble (field id, type and length of the field).
|
|
uint8_t buffer[proto_utils::kMaxSimpleFieldEncodedSize];
|
|
uint8_t* pos = buffer;
|
|
pos = proto_utils::WriteVarInt(proto_utils::MakeTagLengthDelimited(field_id),
|
|
pos);
|
|
pos = proto_utils::WriteVarInt(static_cast<uint32_t>(size), pos);
|
|
WriteToStream(buffer, pos);
|
|
|
|
const uint8_t* src_u8 = reinterpret_cast<const uint8_t*>(src);
|
|
WriteToStream(src_u8, src_u8 + size);
|
|
}
|
|
|
|
size_t Message::AppendScatteredBytes(uint32_t field_id,
|
|
ContiguousMemoryRange* ranges,
|
|
size_t num_ranges) {
|
|
size_t size = 0;
|
|
for (size_t i = 0; i < num_ranges; ++i) {
|
|
size += ranges[i].size();
|
|
}
|
|
|
|
PERFETTO_DCHECK(size < proto_utils::kMaxMessageLength);
|
|
|
|
uint8_t buffer[proto_utils::kMaxSimpleFieldEncodedSize];
|
|
uint8_t* pos = buffer;
|
|
pos = proto_utils::WriteVarInt(proto_utils::MakeTagLengthDelimited(field_id),
|
|
pos);
|
|
pos = proto_utils::WriteVarInt(static_cast<uint32_t>(size), pos);
|
|
WriteToStream(buffer, pos);
|
|
|
|
for (size_t i = 0; i < num_ranges; ++i) {
|
|
auto& range = ranges[i];
|
|
WriteToStream(range.begin, range.end);
|
|
}
|
|
|
|
return size;
|
|
}
|
|
|
|
uint32_t Message::Finalize() {
|
|
if (finalized_)
|
|
return size_;
|
|
|
|
if (nested_message_)
|
|
EndNestedMessage();
|
|
|
|
// Write the length of the nested message a posteriori, using a leading-zero
|
|
// redundant varint encoding.
|
|
if (size_field_) {
|
|
PERFETTO_DCHECK(!finalized_);
|
|
PERFETTO_DCHECK(size_ < proto_utils::kMaxMessageLength);
|
|
PERFETTO_DCHECK(size_ >= size_already_written_);
|
|
proto_utils::WriteRedundantVarInt(size_ - size_already_written_,
|
|
size_field_);
|
|
size_field_ = nullptr;
|
|
}
|
|
|
|
finalized_ = true;
|
|
#if PERFETTO_DCHECK_IS_ON()
|
|
if (handle_)
|
|
handle_->reset_message();
|
|
#endif
|
|
|
|
return size_;
|
|
}
|
|
|
|
Message* Message::BeginNestedMessageInternal(uint32_t field_id) {
|
|
if (nested_message_)
|
|
EndNestedMessage();
|
|
|
|
// Write the proto preamble for the nested message.
|
|
uint8_t data[proto_utils::kMaxTagEncodedSize];
|
|
uint8_t* data_end = proto_utils::WriteVarInt(
|
|
proto_utils::MakeTagLengthDelimited(field_id), data);
|
|
WriteToStream(data, data_end);
|
|
|
|
Message* message = arena_->NewMessage();
|
|
message->Reset(stream_writer_, arena_);
|
|
|
|
// The length of the nested message cannot be known upfront. So right now
|
|
// just reserve the bytes to encode the size after the nested message is done.
|
|
message->set_size_field(
|
|
stream_writer_->ReserveBytes(proto_utils::kMessageLengthFieldSize));
|
|
size_ += proto_utils::kMessageLengthFieldSize;
|
|
|
|
nested_message_ = message;
|
|
return message;
|
|
}
|
|
|
|
void Message::EndNestedMessage() {
|
|
size_ += nested_message_->Finalize();
|
|
arena_->DeleteLastMessage(nested_message_);
|
|
nested_message_ = nullptr;
|
|
}
|
|
|
|
} // namespace protozero
|
|
// gen_amalgamated begin source: src/protozero/message_arena.cc
|
|
/*
|
|
* Copyright (C) 2020 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/message_arena.h"
|
|
|
|
#include <atomic>
|
|
#include <type_traits>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/base/logging.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/message_handle.h"
|
|
|
|
namespace protozero {
|
|
|
|
MessageArena::MessageArena() {
|
|
// The code below assumes that there is always at least one block.
|
|
blocks_.emplace_front();
|
|
static_assert(std::alignment_of<decltype(blocks_.back().storage[0])>::value >=
|
|
alignof(Message),
|
|
"MessageArea's storage is not properly aligned");
|
|
}
|
|
|
|
MessageArena::~MessageArena() = default;
|
|
|
|
Message* MessageArena::NewMessage() {
|
|
PERFETTO_DCHECK(!blocks_.empty()); // Should never become empty.
|
|
|
|
Block* block = &blocks_.back();
|
|
if (PERFETTO_UNLIKELY(block->entries >= Block::kCapacity)) {
|
|
blocks_.emplace_back();
|
|
block = &blocks_.back();
|
|
}
|
|
const auto idx = block->entries++;
|
|
void* storage = &block->storage[idx];
|
|
PERFETTO_ASAN_UNPOISON(storage, sizeof(Message));
|
|
return new (storage) Message();
|
|
}
|
|
|
|
void MessageArena::DeleteLastMessageInternal() {
|
|
PERFETTO_DCHECK(!blocks_.empty()); // Should never be empty, see below.
|
|
Block* block = &blocks_.back();
|
|
PERFETTO_DCHECK(block->entries > 0);
|
|
|
|
// This is the reason why there is no ~Message() call here.
|
|
// MessageArea::Reset() (see header) also relies on dtor being trivial.
|
|
static_assert(std::is_trivially_destructible<Message>::value,
|
|
"Message must be trivially destructible");
|
|
|
|
--block->entries;
|
|
PERFETTO_ASAN_POISON(&block->storage[block->entries], sizeof(Message));
|
|
|
|
// Don't remove the first block to avoid malloc/free calls when the root
|
|
// message is reset. Hitting the allocator all the times is a waste of time.
|
|
if (block->entries == 0 && blocks_.size() > 1) {
|
|
blocks_.pop_back();
|
|
}
|
|
}
|
|
|
|
} // namespace protozero
|
|
// gen_amalgamated begin source: src/protozero/message_handle.cc
|
|
/*
|
|
* Copyright (C) 2017 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/message_handle.h"
|
|
|
|
#include <utility>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
|
|
|
|
namespace protozero {
|
|
|
|
MessageHandleBase::MessageHandleBase(Message* message) : message_(message) {
|
|
#if PERFETTO_DCHECK_IS_ON()
|
|
generation_ = message_ ? message->generation_ : 0;
|
|
if (message_)
|
|
message_->set_handle(this);
|
|
#endif
|
|
}
|
|
|
|
MessageHandleBase::~MessageHandleBase() {
|
|
if (message_) {
|
|
#if PERFETTO_DCHECK_IS_ON()
|
|
PERFETTO_DCHECK(generation_ == message_->generation_);
|
|
#endif
|
|
FinalizeMessage();
|
|
}
|
|
}
|
|
|
|
MessageHandleBase::MessageHandleBase(MessageHandleBase&& other) noexcept {
|
|
Move(std::move(other));
|
|
}
|
|
|
|
MessageHandleBase& MessageHandleBase::operator=(MessageHandleBase&& other) {
|
|
// If the current handle was pointing to a message and is being reset to a new
|
|
// one, finalize the old message. However, if the other message is the same as
|
|
// the one we point to, don't finalize.
|
|
if (message_ && message_ != other.message_)
|
|
FinalizeMessage();
|
|
Move(std::move(other));
|
|
return *this;
|
|
}
|
|
|
|
void MessageHandleBase::Move(MessageHandleBase&& other) {
|
|
message_ = other.message_;
|
|
other.message_ = nullptr;
|
|
#if PERFETTO_DCHECK_IS_ON()
|
|
if (message_) {
|
|
generation_ = message_->generation_;
|
|
message_->set_handle(this);
|
|
}
|
|
#endif
|
|
}
|
|
|
|
} // namespace protozero
|
|
// gen_amalgamated begin source: src/protozero/packed_repeated_fields.cc
|
|
/*
|
|
* Copyright (C) 2017 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/utils.h"
|
|
|
|
namespace protozero {
|
|
|
|
// static
|
|
constexpr size_t PackedBufferBase::kOnStackStorageSize;
|
|
|
|
void PackedBufferBase::GrowSlowpath() {
|
|
size_t write_off = static_cast<size_t>(write_ptr_ - storage_begin_);
|
|
size_t old_size = static_cast<size_t>(storage_end_ - storage_begin_);
|
|
size_t new_size = old_size < 65536 ? (old_size * 2) : (old_size * 3 / 2);
|
|
new_size = perfetto::base::AlignUp<4096>(new_size);
|
|
std::unique_ptr<uint8_t[]> new_buf(new uint8_t[new_size]);
|
|
memcpy(new_buf.get(), storage_begin_, old_size);
|
|
heap_buf_ = std::move(new_buf);
|
|
storage_begin_ = heap_buf_.get();
|
|
storage_end_ = storage_begin_ + new_size;
|
|
write_ptr_ = storage_begin_ + write_off;
|
|
}
|
|
|
|
void PackedBufferBase::Reset() {
|
|
heap_buf_.reset();
|
|
storage_begin_ = reinterpret_cast<uint8_t*>(&stack_buf_[0]);
|
|
storage_end_ = reinterpret_cast<uint8_t*>(&stack_buf_[kOnStackStorageSize]);
|
|
write_ptr_ = storage_begin_;
|
|
}
|
|
|
|
} // namespace protozero
|
|
// gen_amalgamated begin source: src/protozero/proto_decoder.cc
|
|
/*
|
|
* Copyright (C) 2018 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
|
|
|
|
#include <string.h>
|
|
#include <limits>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/base/logging.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/utils.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/proto_utils.h"
|
|
|
|
namespace protozero {
|
|
|
|
using namespace proto_utils;
|
|
|
|
#if __BYTE_ORDER__ != __ORDER_LITTLE_ENDIAN__
|
|
#error Unimplemented for big endian archs.
|
|
#endif
|
|
|
|
namespace {
|
|
|
|
struct ParseFieldResult {
|
|
enum ParseResult { kAbort, kSkip, kOk };
|
|
ParseResult parse_res;
|
|
const uint8_t* next;
|
|
Field field;
|
|
};
|
|
|
|
// Parses one field and returns the field itself and a pointer to the next
|
|
// field to parse. If parsing fails, the returned |next| == |buffer|.
|
|
PERFETTO_ALWAYS_INLINE ParseFieldResult
|
|
ParseOneField(const uint8_t* const buffer, const uint8_t* const end) {
|
|
ParseFieldResult res{ParseFieldResult::kAbort, buffer, Field{}};
|
|
|
|
// The first byte of a proto field is structured as follows:
|
|
// The least 3 significant bits determine the field type.
|
|
// The most 5 significant bits determine the field id. If MSB == 1, the
|
|
// field id continues on the next bytes following the VarInt encoding.
|
|
const uint8_t kFieldTypeNumBits = 3;
|
|
const uint64_t kFieldTypeMask = (1 << kFieldTypeNumBits) - 1; // 0000 0111;
|
|
const uint8_t* pos = buffer;
|
|
|
|
// If we've already hit the end, just return an invalid field.
|
|
if (PERFETTO_UNLIKELY(pos >= end))
|
|
return res;
|
|
|
|
uint64_t preamble = 0;
|
|
if (PERFETTO_LIKELY(*pos < 0x80)) { // Fastpath for fields with ID < 16.
|
|
preamble = *(pos++);
|
|
} else {
|
|
const uint8_t* next = ParseVarInt(pos, end, &preamble);
|
|
if (PERFETTO_UNLIKELY(pos == next))
|
|
return res;
|
|
pos = next;
|
|
}
|
|
|
|
uint32_t field_id = static_cast<uint32_t>(preamble >> kFieldTypeNumBits);
|
|
if (field_id == 0 || pos >= end)
|
|
return res;
|
|
|
|
auto field_type = static_cast<uint8_t>(preamble & kFieldTypeMask);
|
|
const uint8_t* new_pos = pos;
|
|
uint64_t int_value = 0;
|
|
uint64_t size = 0;
|
|
|
|
switch (field_type) {
|
|
case static_cast<uint8_t>(ProtoWireType::kVarInt): {
|
|
new_pos = ParseVarInt(pos, end, &int_value);
|
|
|
|
// new_pos not being greater than pos means ParseVarInt could not fully
|
|
// parse the number. This is because we are out of space in the buffer.
|
|
// Set the id to zero and return but don't update the offset so a future
|
|
// read can read this field.
|
|
if (PERFETTO_UNLIKELY(new_pos == pos))
|
|
return res;
|
|
|
|
break;
|
|
}
|
|
|
|
case static_cast<uint8_t>(ProtoWireType::kLengthDelimited): {
|
|
uint64_t payload_length;
|
|
new_pos = ParseVarInt(pos, end, &payload_length);
|
|
if (PERFETTO_UNLIKELY(new_pos == pos))
|
|
return res;
|
|
|
|
// ParseVarInt guarantees that |new_pos| <= |end| when it succeeds;
|
|
if (payload_length > static_cast<uint64_t>(end - new_pos))
|
|
return res;
|
|
|
|
const uintptr_t payload_start = reinterpret_cast<uintptr_t>(new_pos);
|
|
int_value = payload_start;
|
|
size = payload_length;
|
|
new_pos += payload_length;
|
|
break;
|
|
}
|
|
|
|
case static_cast<uint8_t>(ProtoWireType::kFixed64): {
|
|
new_pos = pos + sizeof(uint64_t);
|
|
if (PERFETTO_UNLIKELY(new_pos > end))
|
|
return res;
|
|
memcpy(&int_value, pos, sizeof(uint64_t));
|
|
break;
|
|
}
|
|
|
|
case static_cast<uint8_t>(ProtoWireType::kFixed32): {
|
|
new_pos = pos + sizeof(uint32_t);
|
|
if (PERFETTO_UNLIKELY(new_pos > end))
|
|
return res;
|
|
memcpy(&int_value, pos, sizeof(uint32_t));
|
|
break;
|
|
}
|
|
|
|
default:
|
|
PERFETTO_DLOG("Invalid proto field type: %u", field_type);
|
|
return res;
|
|
}
|
|
|
|
res.next = new_pos;
|
|
|
|
if (PERFETTO_UNLIKELY(field_id > std::numeric_limits<uint16_t>::max())) {
|
|
PERFETTO_DLOG("Skipping field %" PRIu32 " because its id > 0xFFFF",
|
|
field_id);
|
|
res.parse_res = ParseFieldResult::kSkip;
|
|
return res;
|
|
}
|
|
|
|
if (PERFETTO_UNLIKELY(size > proto_utils::kMaxMessageLength)) {
|
|
PERFETTO_DLOG("Skipping field %" PRIu32 " because it's too big (%" PRIu64
|
|
" KB)",
|
|
field_id, size / 1024);
|
|
res.parse_res = ParseFieldResult::kSkip;
|
|
return res;
|
|
}
|
|
|
|
res.parse_res = ParseFieldResult::kOk;
|
|
res.field.initialize(static_cast<uint16_t>(field_id), field_type, int_value,
|
|
static_cast<uint32_t>(size));
|
|
return res;
|
|
}
|
|
|
|
} // namespace
|
|
|
|
Field ProtoDecoder::FindField(uint32_t field_id) {
|
|
Field res{};
|
|
auto old_position = read_ptr_;
|
|
read_ptr_ = begin_;
|
|
for (auto f = ReadField(); f.valid(); f = ReadField()) {
|
|
if (f.id() == field_id) {
|
|
res = f;
|
|
break;
|
|
}
|
|
}
|
|
read_ptr_ = old_position;
|
|
return res;
|
|
}
|
|
|
|
PERFETTO_ALWAYS_INLINE
|
|
Field ProtoDecoder::ReadField() {
|
|
ParseFieldResult res;
|
|
do {
|
|
res = ParseOneField(read_ptr_, end_);
|
|
read_ptr_ = res.next;
|
|
} while (PERFETTO_UNLIKELY(res.parse_res == ParseFieldResult::kSkip));
|
|
return res.field;
|
|
}
|
|
|
|
void TypedProtoDecoderBase::ParseAllFields() {
|
|
const uint8_t* cur = begin_;
|
|
ParseFieldResult res;
|
|
for (;;) {
|
|
res = ParseOneField(cur, end_);
|
|
PERFETTO_DCHECK(res.parse_res != ParseFieldResult::kOk || res.next != cur);
|
|
cur = res.next;
|
|
if (PERFETTO_UNLIKELY(res.parse_res == ParseFieldResult::kSkip)) {
|
|
continue;
|
|
} else if (PERFETTO_UNLIKELY(res.parse_res == ParseFieldResult::kAbort)) {
|
|
break;
|
|
}
|
|
PERFETTO_DCHECK(res.parse_res == ParseFieldResult::kOk);
|
|
PERFETTO_DCHECK(res.field.valid());
|
|
auto field_id = res.field.id();
|
|
if (PERFETTO_UNLIKELY(field_id >= num_fields_))
|
|
continue;
|
|
|
|
Field* fld = &fields_[field_id];
|
|
if (PERFETTO_LIKELY(!fld->valid())) {
|
|
// This is the first time we see this field.
|
|
*fld = std::move(res.field);
|
|
} else {
|
|
// Repeated field case.
|
|
// In this case we need to:
|
|
// 1. Append the last value of the field to end of the repeated field
|
|
// storage.
|
|
// 2. Replace the default instance at offset |field_id| with the current
|
|
// value. This is because in case of repeated field a call to Get(X) is
|
|
// supposed to return the last value of X, not the first one.
|
|
// This is so that the RepeatedFieldIterator will iterate in the right
|
|
// order, see comments on RepeatedFieldIterator.
|
|
if (PERFETTO_UNLIKELY(size_ >= capacity_)) {
|
|
ExpandHeapStorage();
|
|
// ExpandHeapStorage moves fields_ so we need to update the ptr to fld:
|
|
fld = &fields_[field_id];
|
|
PERFETTO_DCHECK(size_ < capacity_);
|
|
}
|
|
fields_[size_++] = *fld;
|
|
*fld = std::move(res.field);
|
|
}
|
|
}
|
|
read_ptr_ = res.next;
|
|
}
|
|
|
|
void TypedProtoDecoderBase::ExpandHeapStorage() {
|
|
uint32_t new_capacity = capacity_ * 2;
|
|
PERFETTO_CHECK(new_capacity > size_);
|
|
std::unique_ptr<Field[]> new_storage(new Field[new_capacity]);
|
|
|
|
static_assert(std::is_trivially_copyable<Field>::value,
|
|
"Field must be trivially copyable");
|
|
memcpy(&new_storage[0], fields_, sizeof(Field) * size_);
|
|
|
|
heap_storage_ = std::move(new_storage);
|
|
fields_ = &heap_storage_[0];
|
|
capacity_ = new_capacity;
|
|
}
|
|
|
|
} // namespace protozero
|
|
// gen_amalgamated begin source: src/protozero/scattered_heap_buffer.cc
|
|
/*
|
|
* Copyright (C) 2017 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/scattered_heap_buffer.h"
|
|
|
|
#include <algorithm>
|
|
|
|
namespace protozero {
|
|
|
|
ScatteredHeapBuffer::Slice::Slice()
|
|
: buffer_(nullptr), size_(0u), unused_bytes_(0u) {}
|
|
|
|
ScatteredHeapBuffer::Slice::Slice(size_t size)
|
|
: buffer_(std::unique_ptr<uint8_t[]>(new uint8_t[size])),
|
|
size_(size),
|
|
unused_bytes_(size) {
|
|
PERFETTO_DCHECK(size);
|
|
Clear();
|
|
}
|
|
|
|
ScatteredHeapBuffer::Slice::Slice(Slice&& slice) noexcept = default;
|
|
|
|
ScatteredHeapBuffer::Slice::~Slice() = default;
|
|
|
|
ScatteredHeapBuffer::Slice& ScatteredHeapBuffer::Slice::operator=(Slice&&) =
|
|
default;
|
|
|
|
void ScatteredHeapBuffer::Slice::Clear() {
|
|
unused_bytes_ = size_;
|
|
#if PERFETTO_DCHECK_IS_ON()
|
|
memset(start(), 0xff, size_);
|
|
#endif // PERFETTO_DCHECK_IS_ON()
|
|
}
|
|
|
|
ScatteredHeapBuffer::ScatteredHeapBuffer(size_t initial_slice_size_bytes,
|
|
size_t maximum_slice_size_bytes)
|
|
: next_slice_size_(initial_slice_size_bytes),
|
|
maximum_slice_size_(maximum_slice_size_bytes) {
|
|
PERFETTO_DCHECK(next_slice_size_ && maximum_slice_size_);
|
|
PERFETTO_DCHECK(maximum_slice_size_ >= initial_slice_size_bytes);
|
|
}
|
|
|
|
ScatteredHeapBuffer::~ScatteredHeapBuffer() = default;
|
|
|
|
protozero::ContiguousMemoryRange ScatteredHeapBuffer::GetNewBuffer() {
|
|
PERFETTO_CHECK(writer_);
|
|
AdjustUsedSizeOfCurrentSlice();
|
|
|
|
if (cached_slice_.start()) {
|
|
slices_.push_back(std::move(cached_slice_));
|
|
PERFETTO_DCHECK(!cached_slice_.start());
|
|
} else {
|
|
slices_.emplace_back(next_slice_size_);
|
|
}
|
|
next_slice_size_ = std::min(maximum_slice_size_, next_slice_size_ * 2);
|
|
return slices_.back().GetTotalRange();
|
|
}
|
|
|
|
std::vector<uint8_t> ScatteredHeapBuffer::StitchSlices() {
|
|
AdjustUsedSizeOfCurrentSlice();
|
|
std::vector<uint8_t> buffer;
|
|
for (const auto& slice : slices_) {
|
|
auto used_range = slice.GetUsedRange();
|
|
buffer.insert(buffer.end(), used_range.begin, used_range.end);
|
|
}
|
|
return buffer;
|
|
}
|
|
|
|
std::vector<protozero::ContiguousMemoryRange> ScatteredHeapBuffer::GetRanges() {
|
|
AdjustUsedSizeOfCurrentSlice();
|
|
std::vector<protozero::ContiguousMemoryRange> ranges;
|
|
for (const auto& slice : slices_)
|
|
ranges.push_back(slice.GetUsedRange());
|
|
return ranges;
|
|
}
|
|
|
|
void ScatteredHeapBuffer::AdjustUsedSizeOfCurrentSlice() {
|
|
if (!slices_.empty())
|
|
slices_.back().set_unused_bytes(writer_->bytes_available());
|
|
}
|
|
|
|
size_t ScatteredHeapBuffer::GetTotalSize() {
|
|
size_t total_size = 0;
|
|
for (auto& slice : slices_) {
|
|
total_size += slice.size();
|
|
}
|
|
return total_size;
|
|
}
|
|
|
|
void ScatteredHeapBuffer::Reset() {
|
|
if (slices_.empty())
|
|
return;
|
|
cached_slice_ = std::move(slices_.front());
|
|
cached_slice_.Clear();
|
|
slices_.clear();
|
|
}
|
|
|
|
} // namespace protozero
|
|
// gen_amalgamated begin source: src/protozero/scattered_stream_null_delegate.cc
|
|
// gen_amalgamated begin header: include/perfetto/protozero/scattered_stream_null_delegate.h
|
|
/*
|
|
* Copyright (C) 2018 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
#ifndef INCLUDE_PERFETTO_PROTOZERO_SCATTERED_STREAM_NULL_DELEGATE_H_
|
|
#define INCLUDE_PERFETTO_PROTOZERO_SCATTERED_STREAM_NULL_DELEGATE_H_
|
|
|
|
#include <memory>
|
|
#include <vector>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/base/export.h"
|
|
// gen_amalgamated expanded: #include "perfetto/base/logging.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/contiguous_memory_range.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/scattered_stream_writer.h"
|
|
|
|
namespace protozero {
|
|
|
|
class PERFETTO_EXPORT ScatteredStreamWriterNullDelegate
|
|
: public ScatteredStreamWriter::Delegate {
|
|
public:
|
|
explicit ScatteredStreamWriterNullDelegate(size_t chunk_size);
|
|
~ScatteredStreamWriterNullDelegate() override;
|
|
|
|
// protozero::ScatteredStreamWriter::Delegate implementation.
|
|
ContiguousMemoryRange GetNewBuffer() override;
|
|
|
|
private:
|
|
const size_t chunk_size_;
|
|
std::unique_ptr<uint8_t[]> chunk_;
|
|
};
|
|
|
|
} // namespace protozero
|
|
|
|
#endif // INCLUDE_PERFETTO_PROTOZERO_SCATTERED_STREAM_NULL_DELEGATE_H_
|
|
/*
|
|
* Copyright (C) 2018 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/scattered_stream_null_delegate.h"
|
|
|
|
namespace protozero {
|
|
|
|
// An implementation of ScatteredStreamWriter::Delegate which always returns
|
|
// the same piece of memory.
|
|
// This is used when we need to no-op the writers (e.g. during teardown or in
|
|
// case of resource exhaustion), avoiding that the clients have to deal with
|
|
// nullptr checks.
|
|
ScatteredStreamWriterNullDelegate::ScatteredStreamWriterNullDelegate(
|
|
size_t chunk_size)
|
|
: chunk_size_(chunk_size),
|
|
chunk_(std::unique_ptr<uint8_t[]>(new uint8_t[chunk_size_])) {}
|
|
|
|
ScatteredStreamWriterNullDelegate::~ScatteredStreamWriterNullDelegate() {}
|
|
|
|
ContiguousMemoryRange ScatteredStreamWriterNullDelegate::GetNewBuffer() {
|
|
return {chunk_.get(), chunk_.get() + chunk_size_};
|
|
}
|
|
|
|
} // namespace protozero
|
|
// gen_amalgamated begin source: src/protozero/scattered_stream_writer.cc
|
|
/*
|
|
* Copyright (C) 2017 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/scattered_stream_writer.h"
|
|
|
|
#include <algorithm>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/base/logging.h"
|
|
|
|
namespace protozero {
|
|
|
|
ScatteredStreamWriter::Delegate::~Delegate() {}
|
|
|
|
ScatteredStreamWriter::ScatteredStreamWriter(Delegate* delegate)
|
|
: delegate_(delegate),
|
|
cur_range_({nullptr, nullptr}),
|
|
write_ptr_(nullptr) {}
|
|
|
|
ScatteredStreamWriter::~ScatteredStreamWriter() {}
|
|
|
|
void ScatteredStreamWriter::Reset(ContiguousMemoryRange range) {
|
|
written_previously_ += static_cast<uint64_t>(write_ptr_ - cur_range_.begin);
|
|
cur_range_ = range;
|
|
write_ptr_ = range.begin;
|
|
PERFETTO_DCHECK(!write_ptr_ || write_ptr_ < cur_range_.end);
|
|
}
|
|
|
|
void ScatteredStreamWriter::Extend() {
|
|
Reset(delegate_->GetNewBuffer());
|
|
}
|
|
|
|
void ScatteredStreamWriter::WriteBytesSlowPath(const uint8_t* src,
|
|
size_t size) {
|
|
size_t bytes_left = size;
|
|
while (bytes_left > 0) {
|
|
if (write_ptr_ >= cur_range_.end)
|
|
Extend();
|
|
const size_t burst_size = std::min(bytes_available(), bytes_left);
|
|
WriteBytesUnsafe(src, burst_size);
|
|
bytes_left -= burst_size;
|
|
src += burst_size;
|
|
}
|
|
}
|
|
|
|
// TODO(primiano): perf optimization: I suspect that at the end this will always
|
|
// be called with |size| == 4, in which case we might just hardcode it.
|
|
uint8_t* ScatteredStreamWriter::ReserveBytes(size_t size) {
|
|
if (write_ptr_ + size > cur_range_.end) {
|
|
// Assume the reservations are always < Delegate::GetNewBuffer().size(),
|
|
// so that one single call to Extend() will definitely give enough headroom.
|
|
Extend();
|
|
PERFETTO_DCHECK(write_ptr_ + size <= cur_range_.end);
|
|
}
|
|
uint8_t* begin = write_ptr_;
|
|
write_ptr_ += size;
|
|
#if PERFETTO_DCHECK_IS_ON()
|
|
memset(begin, 0, size);
|
|
#endif
|
|
return begin;
|
|
}
|
|
|
|
} // namespace protozero
|
|
// gen_amalgamated begin source: src/protozero/static_buffer.cc
|
|
// gen_amalgamated begin header: include/perfetto/protozero/static_buffer.h
|
|
/*
|
|
* Copyright (C) 2019 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
#ifndef INCLUDE_PERFETTO_PROTOZERO_STATIC_BUFFER_H_
|
|
#define INCLUDE_PERFETTO_PROTOZERO_STATIC_BUFFER_H_
|
|
|
|
#include <memory>
|
|
#include <string>
|
|
#include <vector>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/base/export.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/root_message.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/scattered_stream_writer.h"
|
|
|
|
namespace protozero {
|
|
|
|
class Message;
|
|
|
|
// A simple implementation of ScatteredStreamWriter::Delegate backed by a
|
|
// fixed-size buffer. It doesn't support expansion. The caller needs to ensure
|
|
// to never write more than the size of the buffer. Will CHECK() otherwise.
|
|
class PERFETTO_EXPORT StaticBufferDelegate
|
|
: public ScatteredStreamWriter::Delegate {
|
|
public:
|
|
StaticBufferDelegate(uint8_t* buf, size_t len) : range_{buf, buf + len} {}
|
|
~StaticBufferDelegate() override;
|
|
|
|
// ScatteredStreamWriter::Delegate implementation.
|
|
ContiguousMemoryRange GetNewBuffer() override;
|
|
|
|
ContiguousMemoryRange const range_;
|
|
bool get_new_buffer_called_once_ = false;
|
|
};
|
|
|
|
// Helper function to create protozero messages backed by a fixed-size buffer
|
|
// in one line. You can write:
|
|
// protozero::Static<protozero::MyMessage> msg(buf.data(), buf.size());
|
|
// msg->set_stuff(...);
|
|
// size_t bytes_encoded = msg.Finalize();
|
|
template <typename T /* protozero::Message */>
|
|
class StaticBuffered {
|
|
public:
|
|
StaticBuffered(void* buf, size_t len)
|
|
: delegate_(reinterpret_cast<uint8_t*>(buf), len), writer_(&delegate_) {
|
|
msg_.Reset(&writer_);
|
|
}
|
|
|
|
// This can't be neither copied nor moved because Message hands out pointers
|
|
// to itself when creating submessages.
|
|
StaticBuffered(const StaticBuffered&) = delete;
|
|
StaticBuffered& operator=(const StaticBuffered&) = delete;
|
|
StaticBuffered(StaticBuffered&&) = delete;
|
|
StaticBuffered& operator=(StaticBuffered&&) = delete;
|
|
|
|
T* get() { return &msg_; }
|
|
T* operator->() { return &msg_; }
|
|
|
|
// The lack of a size() method is deliberate. It's to prevent that one
|
|
// accidentally calls size() before Finalize().
|
|
|
|
// Returns the number of encoded bytes (<= the size passed in the ctor).
|
|
size_t Finalize() {
|
|
msg_.Finalize();
|
|
return static_cast<size_t>(writer_.write_ptr() - delegate_.range_.begin);
|
|
}
|
|
|
|
private:
|
|
StaticBufferDelegate delegate_;
|
|
ScatteredStreamWriter writer_;
|
|
RootMessage<T> msg_;
|
|
};
|
|
|
|
// Helper function to create stack-based protozero messages in one line.
|
|
// You can write:
|
|
// protozero::StackBuffered<protozero::MyMessage, 16> msg;
|
|
// msg->set_stuff(...);
|
|
// size_t bytes_encoded = msg.Finalize();
|
|
template <typename T /* protozero::Message */, size_t N>
|
|
class StackBuffered : public StaticBuffered<T> {
|
|
public:
|
|
StackBuffered() : StaticBuffered<T>(&buf_[0], N) {}
|
|
|
|
private:
|
|
uint8_t buf_[N]; // Deliberately not initialized.
|
|
};
|
|
|
|
} // namespace protozero
|
|
|
|
#endif // INCLUDE_PERFETTO_PROTOZERO_STATIC_BUFFER_H_
|
|
/*
|
|
* Copyright (C) 2019 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/static_buffer.h"
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/base/logging.h"
|
|
|
|
namespace protozero {
|
|
|
|
StaticBufferDelegate::~StaticBufferDelegate() = default;
|
|
|
|
ContiguousMemoryRange StaticBufferDelegate::GetNewBuffer() {
|
|
if (get_new_buffer_called_once_) {
|
|
// This is the 2nd time GetNewBuffer is called. The estimate is wrong. We
|
|
// shouldn't try to grow the buffer after the initial call.
|
|
PERFETTO_FATAL("Static buffer too small");
|
|
}
|
|
get_new_buffer_called_once_ = true;
|
|
return range_;
|
|
}
|
|
|
|
} // namespace protozero
|
|
// gen_amalgamated begin source: src/protozero/virtual_destructors.cc
|
|
/*
|
|
* Copyright (C) 2019 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/cpp_message_obj.h"
|
|
|
|
namespace protozero {
|
|
|
|
CppMessageObj::~CppMessageObj() = default;
|
|
|
|
} // namespace protozero
|
|
// gen_amalgamated begin source: gen/protos/perfetto/common/android_log_constants.gen.cc
|
|
// gen_amalgamated begin header: gen/protos/perfetto/common/android_log_constants.gen.h
|
|
// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
|
|
#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_COMMON_ANDROID_LOG_CONSTANTS_PROTO_CPP_H_
|
|
#define PERFETTO_PROTOS_PROTOS_PERFETTO_COMMON_ANDROID_LOG_CONSTANTS_PROTO_CPP_H_
|
|
|
|
#include <stdint.h>
|
|
#include <bitset>
|
|
#include <vector>
|
|
#include <string>
|
|
#include <type_traits>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/cpp_message_obj.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/copyable_ptr.h"
|
|
// gen_amalgamated expanded: #include "perfetto/base/export.h"
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace gen {
|
|
enum AndroidLogId : int;
|
|
enum AndroidLogPriority : int;
|
|
} // namespace perfetto
|
|
} // namespace protos
|
|
} // namespace gen
|
|
|
|
namespace protozero {
|
|
class Message;
|
|
} // namespace protozero
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace gen {
|
|
enum AndroidLogId : int {
|
|
LID_DEFAULT = 0,
|
|
LID_RADIO = 1,
|
|
LID_EVENTS = 2,
|
|
LID_SYSTEM = 3,
|
|
LID_CRASH = 4,
|
|
LID_STATS = 5,
|
|
LID_SECURITY = 6,
|
|
LID_KERNEL = 7,
|
|
};
|
|
enum AndroidLogPriority : int {
|
|
PRIO_UNSPECIFIED = 0,
|
|
PRIO_UNUSED = 1,
|
|
PRIO_VERBOSE = 2,
|
|
PRIO_DEBUG = 3,
|
|
PRIO_INFO = 4,
|
|
PRIO_WARN = 5,
|
|
PRIO_ERROR = 6,
|
|
PRIO_FATAL = 7,
|
|
};
|
|
} // namespace perfetto
|
|
} // namespace protos
|
|
} // namespace gen
|
|
|
|
#endif // PERFETTO_PROTOS_PROTOS_PERFETTO_COMMON_ANDROID_LOG_CONSTANTS_PROTO_CPP_H_
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/scattered_heap_buffer.h"
|
|
// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
|
|
#pragma GCC diagnostic push
|
|
#pragma GCC diagnostic ignored "-Wfloat-equal"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/common/android_log_constants.gen.h"
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace gen {
|
|
} // namespace perfetto
|
|
} // namespace protos
|
|
} // namespace gen
|
|
#pragma GCC diagnostic pop
|
|
// gen_amalgamated begin source: gen/protos/perfetto/common/builtin_clock.gen.cc
|
|
// gen_amalgamated begin header: gen/protos/perfetto/common/builtin_clock.gen.h
|
|
// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
|
|
#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_COMMON_BUILTIN_CLOCK_PROTO_CPP_H_
|
|
#define PERFETTO_PROTOS_PROTOS_PERFETTO_COMMON_BUILTIN_CLOCK_PROTO_CPP_H_
|
|
|
|
#include <stdint.h>
|
|
#include <bitset>
|
|
#include <vector>
|
|
#include <string>
|
|
#include <type_traits>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/cpp_message_obj.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/copyable_ptr.h"
|
|
// gen_amalgamated expanded: #include "perfetto/base/export.h"
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace gen {
|
|
enum BuiltinClock : int;
|
|
} // namespace perfetto
|
|
} // namespace protos
|
|
} // namespace gen
|
|
|
|
namespace protozero {
|
|
class Message;
|
|
} // namespace protozero
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace gen {
|
|
enum BuiltinClock : int {
|
|
BUILTIN_CLOCK_UNKNOWN = 0,
|
|
BUILTIN_CLOCK_REALTIME = 1,
|
|
BUILTIN_CLOCK_REALTIME_COARSE = 2,
|
|
BUILTIN_CLOCK_MONOTONIC = 3,
|
|
BUILTIN_CLOCK_MONOTONIC_COARSE = 4,
|
|
BUILTIN_CLOCK_MONOTONIC_RAW = 5,
|
|
BUILTIN_CLOCK_BOOTTIME = 6,
|
|
BUILTIN_CLOCK_MAX_ID = 63,
|
|
};
|
|
} // namespace perfetto
|
|
} // namespace protos
|
|
} // namespace gen
|
|
|
|
#endif // PERFETTO_PROTOS_PROTOS_PERFETTO_COMMON_BUILTIN_CLOCK_PROTO_CPP_H_
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/scattered_heap_buffer.h"
|
|
// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
|
|
#pragma GCC diagnostic push
|
|
#pragma GCC diagnostic ignored "-Wfloat-equal"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/common/builtin_clock.gen.h"
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace gen {
|
|
} // namespace perfetto
|
|
} // namespace protos
|
|
} // namespace gen
|
|
#pragma GCC diagnostic pop
|
|
// gen_amalgamated begin source: gen/protos/perfetto/common/commit_data_request.gen.cc
|
|
// gen_amalgamated begin header: gen/protos/perfetto/common/commit_data_request.gen.h
|
|
// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
|
|
#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_COMMON_COMMIT_DATA_REQUEST_PROTO_CPP_H_
|
|
#define PERFETTO_PROTOS_PROTOS_PERFETTO_COMMON_COMMIT_DATA_REQUEST_PROTO_CPP_H_
|
|
|
|
#include <stdint.h>
|
|
#include <bitset>
|
|
#include <vector>
|
|
#include <string>
|
|
#include <type_traits>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/cpp_message_obj.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/copyable_ptr.h"
|
|
// gen_amalgamated expanded: #include "perfetto/base/export.h"
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace gen {
|
|
class CommitDataRequest;
|
|
class CommitDataRequest_ChunkToPatch;
|
|
class CommitDataRequest_ChunkToPatch_Patch;
|
|
class CommitDataRequest_ChunksToMove;
|
|
} // namespace perfetto
|
|
} // namespace protos
|
|
} // namespace gen
|
|
|
|
namespace protozero {
|
|
class Message;
|
|
} // namespace protozero
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace gen {
|
|
|
|
class PERFETTO_EXPORT CommitDataRequest : public ::protozero::CppMessageObj {
|
|
public:
|
|
using ChunksToMove = CommitDataRequest_ChunksToMove;
|
|
using ChunkToPatch = CommitDataRequest_ChunkToPatch;
|
|
enum FieldNumbers {
|
|
kChunksToMoveFieldNumber = 1,
|
|
kChunksToPatchFieldNumber = 2,
|
|
kFlushRequestIdFieldNumber = 3,
|
|
};
|
|
|
|
CommitDataRequest();
|
|
~CommitDataRequest() override;
|
|
CommitDataRequest(CommitDataRequest&&) noexcept;
|
|
CommitDataRequest& operator=(CommitDataRequest&&);
|
|
CommitDataRequest(const CommitDataRequest&);
|
|
CommitDataRequest& operator=(const CommitDataRequest&);
|
|
bool operator==(const CommitDataRequest&) const;
|
|
bool operator!=(const CommitDataRequest& other) const { return !(*this == other); }
|
|
|
|
bool ParseFromArray(const void*, size_t) override;
|
|
std::string SerializeAsString() const override;
|
|
std::vector<uint8_t> SerializeAsArray() const override;
|
|
void Serialize(::protozero::Message*) const;
|
|
|
|
int chunks_to_move_size() const { return static_cast<int>(chunks_to_move_.size()); }
|
|
const std::vector<CommitDataRequest_ChunksToMove>& chunks_to_move() const { return chunks_to_move_; }
|
|
std::vector<CommitDataRequest_ChunksToMove>* mutable_chunks_to_move() { return &chunks_to_move_; }
|
|
void clear_chunks_to_move() { chunks_to_move_.clear(); }
|
|
CommitDataRequest_ChunksToMove* add_chunks_to_move() { chunks_to_move_.emplace_back(); return &chunks_to_move_.back(); }
|
|
|
|
int chunks_to_patch_size() const { return static_cast<int>(chunks_to_patch_.size()); }
|
|
const std::vector<CommitDataRequest_ChunkToPatch>& chunks_to_patch() const { return chunks_to_patch_; }
|
|
std::vector<CommitDataRequest_ChunkToPatch>* mutable_chunks_to_patch() { return &chunks_to_patch_; }
|
|
void clear_chunks_to_patch() { chunks_to_patch_.clear(); }
|
|
CommitDataRequest_ChunkToPatch* add_chunks_to_patch() { chunks_to_patch_.emplace_back(); return &chunks_to_patch_.back(); }
|
|
|
|
bool has_flush_request_id() const { return _has_field_[3]; }
|
|
uint64_t flush_request_id() const { return flush_request_id_; }
|
|
void set_flush_request_id(uint64_t value) { flush_request_id_ = value; _has_field_.set(3); }
|
|
|
|
private:
|
|
std::vector<CommitDataRequest_ChunksToMove> chunks_to_move_;
|
|
std::vector<CommitDataRequest_ChunkToPatch> chunks_to_patch_;
|
|
uint64_t flush_request_id_{};
|
|
|
|
// Allows to preserve unknown protobuf fields for compatibility
|
|
// with future versions of .proto files.
|
|
std::string unknown_fields_;
|
|
|
|
std::bitset<4> _has_field_{};
|
|
};
|
|
|
|
|
|
class PERFETTO_EXPORT CommitDataRequest_ChunkToPatch : public ::protozero::CppMessageObj {
|
|
public:
|
|
using Patch = CommitDataRequest_ChunkToPatch_Patch;
|
|
enum FieldNumbers {
|
|
kTargetBufferFieldNumber = 1,
|
|
kWriterIdFieldNumber = 2,
|
|
kChunkIdFieldNumber = 3,
|
|
kPatchesFieldNumber = 4,
|
|
kHasMorePatchesFieldNumber = 5,
|
|
};
|
|
|
|
CommitDataRequest_ChunkToPatch();
|
|
~CommitDataRequest_ChunkToPatch() override;
|
|
CommitDataRequest_ChunkToPatch(CommitDataRequest_ChunkToPatch&&) noexcept;
|
|
CommitDataRequest_ChunkToPatch& operator=(CommitDataRequest_ChunkToPatch&&);
|
|
CommitDataRequest_ChunkToPatch(const CommitDataRequest_ChunkToPatch&);
|
|
CommitDataRequest_ChunkToPatch& operator=(const CommitDataRequest_ChunkToPatch&);
|
|
bool operator==(const CommitDataRequest_ChunkToPatch&) const;
|
|
bool operator!=(const CommitDataRequest_ChunkToPatch& other) const { return !(*this == other); }
|
|
|
|
bool ParseFromArray(const void*, size_t) override;
|
|
std::string SerializeAsString() const override;
|
|
std::vector<uint8_t> SerializeAsArray() const override;
|
|
void Serialize(::protozero::Message*) const;
|
|
|
|
bool has_target_buffer() const { return _has_field_[1]; }
|
|
uint32_t target_buffer() const { return target_buffer_; }
|
|
void set_target_buffer(uint32_t value) { target_buffer_ = value; _has_field_.set(1); }
|
|
|
|
bool has_writer_id() const { return _has_field_[2]; }
|
|
uint32_t writer_id() const { return writer_id_; }
|
|
void set_writer_id(uint32_t value) { writer_id_ = value; _has_field_.set(2); }
|
|
|
|
bool has_chunk_id() const { return _has_field_[3]; }
|
|
uint32_t chunk_id() const { return chunk_id_; }
|
|
void set_chunk_id(uint32_t value) { chunk_id_ = value; _has_field_.set(3); }
|
|
|
|
int patches_size() const { return static_cast<int>(patches_.size()); }
|
|
const std::vector<CommitDataRequest_ChunkToPatch_Patch>& patches() const { return patches_; }
|
|
std::vector<CommitDataRequest_ChunkToPatch_Patch>* mutable_patches() { return &patches_; }
|
|
void clear_patches() { patches_.clear(); }
|
|
CommitDataRequest_ChunkToPatch_Patch* add_patches() { patches_.emplace_back(); return &patches_.back(); }
|
|
|
|
bool has_has_more_patches() const { return _has_field_[5]; }
|
|
bool has_more_patches() const { return has_more_patches_; }
|
|
void set_has_more_patches(bool value) { has_more_patches_ = value; _has_field_.set(5); }
|
|
|
|
private:
|
|
uint32_t target_buffer_{};
|
|
uint32_t writer_id_{};
|
|
uint32_t chunk_id_{};
|
|
std::vector<CommitDataRequest_ChunkToPatch_Patch> patches_;
|
|
bool has_more_patches_{};
|
|
|
|
// Allows to preserve unknown protobuf fields for compatibility
|
|
// with future versions of .proto files.
|
|
std::string unknown_fields_;
|
|
|
|
std::bitset<6> _has_field_{};
|
|
};
|
|
|
|
|
|
class PERFETTO_EXPORT CommitDataRequest_ChunkToPatch_Patch : public ::protozero::CppMessageObj {
|
|
public:
|
|
enum FieldNumbers {
|
|
kOffsetFieldNumber = 1,
|
|
kDataFieldNumber = 2,
|
|
};
|
|
|
|
CommitDataRequest_ChunkToPatch_Patch();
|
|
~CommitDataRequest_ChunkToPatch_Patch() override;
|
|
CommitDataRequest_ChunkToPatch_Patch(CommitDataRequest_ChunkToPatch_Patch&&) noexcept;
|
|
CommitDataRequest_ChunkToPatch_Patch& operator=(CommitDataRequest_ChunkToPatch_Patch&&);
|
|
CommitDataRequest_ChunkToPatch_Patch(const CommitDataRequest_ChunkToPatch_Patch&);
|
|
CommitDataRequest_ChunkToPatch_Patch& operator=(const CommitDataRequest_ChunkToPatch_Patch&);
|
|
bool operator==(const CommitDataRequest_ChunkToPatch_Patch&) const;
|
|
bool operator!=(const CommitDataRequest_ChunkToPatch_Patch& other) const { return !(*this == other); }
|
|
|
|
bool ParseFromArray(const void*, size_t) override;
|
|
std::string SerializeAsString() const override;
|
|
std::vector<uint8_t> SerializeAsArray() const override;
|
|
void Serialize(::protozero::Message*) const;
|
|
|
|
bool has_offset() const { return _has_field_[1]; }
|
|
uint32_t offset() const { return offset_; }
|
|
void set_offset(uint32_t value) { offset_ = value; _has_field_.set(1); }
|
|
|
|
bool has_data() const { return _has_field_[2]; }
|
|
const std::string& data() const { return data_; }
|
|
void set_data(const std::string& value) { data_ = value; _has_field_.set(2); }
|
|
void set_data(const void* p, size_t s) { data_.assign(reinterpret_cast<const char*>(p), s); _has_field_.set(2); }
|
|
|
|
private:
|
|
uint32_t offset_{};
|
|
std::string data_{};
|
|
|
|
// Allows to preserve unknown protobuf fields for compatibility
|
|
// with future versions of .proto files.
|
|
std::string unknown_fields_;
|
|
|
|
std::bitset<3> _has_field_{};
|
|
};
|
|
|
|
|
|
class PERFETTO_EXPORT CommitDataRequest_ChunksToMove : public ::protozero::CppMessageObj {
|
|
public:
|
|
enum FieldNumbers {
|
|
kPageFieldNumber = 1,
|
|
kChunkFieldNumber = 2,
|
|
kTargetBufferFieldNumber = 3,
|
|
};
|
|
|
|
CommitDataRequest_ChunksToMove();
|
|
~CommitDataRequest_ChunksToMove() override;
|
|
CommitDataRequest_ChunksToMove(CommitDataRequest_ChunksToMove&&) noexcept;
|
|
CommitDataRequest_ChunksToMove& operator=(CommitDataRequest_ChunksToMove&&);
|
|
CommitDataRequest_ChunksToMove(const CommitDataRequest_ChunksToMove&);
|
|
CommitDataRequest_ChunksToMove& operator=(const CommitDataRequest_ChunksToMove&);
|
|
bool operator==(const CommitDataRequest_ChunksToMove&) const;
|
|
bool operator!=(const CommitDataRequest_ChunksToMove& other) const { return !(*this == other); }
|
|
|
|
bool ParseFromArray(const void*, size_t) override;
|
|
std::string SerializeAsString() const override;
|
|
std::vector<uint8_t> SerializeAsArray() const override;
|
|
void Serialize(::protozero::Message*) const;
|
|
|
|
bool has_page() const { return _has_field_[1]; }
|
|
uint32_t page() const { return page_; }
|
|
void set_page(uint32_t value) { page_ = value; _has_field_.set(1); }
|
|
|
|
bool has_chunk() const { return _has_field_[2]; }
|
|
uint32_t chunk() const { return chunk_; }
|
|
void set_chunk(uint32_t value) { chunk_ = value; _has_field_.set(2); }
|
|
|
|
bool has_target_buffer() const { return _has_field_[3]; }
|
|
uint32_t target_buffer() const { return target_buffer_; }
|
|
void set_target_buffer(uint32_t value) { target_buffer_ = value; _has_field_.set(3); }
|
|
|
|
private:
|
|
uint32_t page_{};
|
|
uint32_t chunk_{};
|
|
uint32_t target_buffer_{};
|
|
|
|
// Allows to preserve unknown protobuf fields for compatibility
|
|
// with future versions of .proto files.
|
|
std::string unknown_fields_;
|
|
|
|
std::bitset<4> _has_field_{};
|
|
};
|
|
|
|
} // namespace perfetto
|
|
} // namespace protos
|
|
} // namespace gen
|
|
|
|
#endif // PERFETTO_PROTOS_PROTOS_PERFETTO_COMMON_COMMIT_DATA_REQUEST_PROTO_CPP_H_
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/scattered_heap_buffer.h"
|
|
// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
|
|
#pragma GCC diagnostic push
|
|
#pragma GCC diagnostic ignored "-Wfloat-equal"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/common/commit_data_request.gen.h"
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace gen {
|
|
|
|
CommitDataRequest::CommitDataRequest() = default;
|
|
CommitDataRequest::~CommitDataRequest() = default;
|
|
CommitDataRequest::CommitDataRequest(const CommitDataRequest&) = default;
|
|
CommitDataRequest& CommitDataRequest::operator=(const CommitDataRequest&) = default;
|
|
CommitDataRequest::CommitDataRequest(CommitDataRequest&&) noexcept = default;
|
|
CommitDataRequest& CommitDataRequest::operator=(CommitDataRequest&&) = default;
|
|
|
|
bool CommitDataRequest::operator==(const CommitDataRequest& other) const {
|
|
return unknown_fields_ == other.unknown_fields_
|
|
&& chunks_to_move_ == other.chunks_to_move_
|
|
&& chunks_to_patch_ == other.chunks_to_patch_
|
|
&& flush_request_id_ == other.flush_request_id_;
|
|
}
|
|
|
|
bool CommitDataRequest::ParseFromArray(const void* raw, size_t size) {
|
|
chunks_to_move_.clear();
|
|
chunks_to_patch_.clear();
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* chunks_to_move */:
|
|
chunks_to_move_.emplace_back();
|
|
chunks_to_move_.back().ParseFromString(field.as_std_string());
|
|
break;
|
|
case 2 /* chunks_to_patch */:
|
|
chunks_to_patch_.emplace_back();
|
|
chunks_to_patch_.back().ParseFromString(field.as_std_string());
|
|
break;
|
|
case 3 /* flush_request_id */:
|
|
field.get(&flush_request_id_);
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string CommitDataRequest::SerializeAsString() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> CommitDataRequest::SerializeAsArray() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void CommitDataRequest::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: chunks_to_move
|
|
for (auto& it : chunks_to_move_) {
|
|
it.Serialize(msg->BeginNestedMessage<::protozero::Message>(1));
|
|
}
|
|
|
|
// Field 2: chunks_to_patch
|
|
for (auto& it : chunks_to_patch_) {
|
|
it.Serialize(msg->BeginNestedMessage<::protozero::Message>(2));
|
|
}
|
|
|
|
// Field 3: flush_request_id
|
|
if (_has_field_[3]) {
|
|
msg->AppendVarInt(3, flush_request_id_);
|
|
}
|
|
|
|
msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
|
|
}
|
|
|
|
|
|
CommitDataRequest_ChunkToPatch::CommitDataRequest_ChunkToPatch() = default;
|
|
CommitDataRequest_ChunkToPatch::~CommitDataRequest_ChunkToPatch() = default;
|
|
CommitDataRequest_ChunkToPatch::CommitDataRequest_ChunkToPatch(const CommitDataRequest_ChunkToPatch&) = default;
|
|
CommitDataRequest_ChunkToPatch& CommitDataRequest_ChunkToPatch::operator=(const CommitDataRequest_ChunkToPatch&) = default;
|
|
CommitDataRequest_ChunkToPatch::CommitDataRequest_ChunkToPatch(CommitDataRequest_ChunkToPatch&&) noexcept = default;
|
|
CommitDataRequest_ChunkToPatch& CommitDataRequest_ChunkToPatch::operator=(CommitDataRequest_ChunkToPatch&&) = default;
|
|
|
|
bool CommitDataRequest_ChunkToPatch::operator==(const CommitDataRequest_ChunkToPatch& other) const {
|
|
return unknown_fields_ == other.unknown_fields_
|
|
&& target_buffer_ == other.target_buffer_
|
|
&& writer_id_ == other.writer_id_
|
|
&& chunk_id_ == other.chunk_id_
|
|
&& patches_ == other.patches_
|
|
&& has_more_patches_ == other.has_more_patches_;
|
|
}
|
|
|
|
bool CommitDataRequest_ChunkToPatch::ParseFromArray(const void* raw, size_t size) {
|
|
patches_.clear();
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* target_buffer */:
|
|
field.get(&target_buffer_);
|
|
break;
|
|
case 2 /* writer_id */:
|
|
field.get(&writer_id_);
|
|
break;
|
|
case 3 /* chunk_id */:
|
|
field.get(&chunk_id_);
|
|
break;
|
|
case 4 /* patches */:
|
|
patches_.emplace_back();
|
|
patches_.back().ParseFromString(field.as_std_string());
|
|
break;
|
|
case 5 /* has_more_patches */:
|
|
field.get(&has_more_patches_);
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string CommitDataRequest_ChunkToPatch::SerializeAsString() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> CommitDataRequest_ChunkToPatch::SerializeAsArray() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void CommitDataRequest_ChunkToPatch::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: target_buffer
|
|
if (_has_field_[1]) {
|
|
msg->AppendVarInt(1, target_buffer_);
|
|
}
|
|
|
|
// Field 2: writer_id
|
|
if (_has_field_[2]) {
|
|
msg->AppendVarInt(2, writer_id_);
|
|
}
|
|
|
|
// Field 3: chunk_id
|
|
if (_has_field_[3]) {
|
|
msg->AppendVarInt(3, chunk_id_);
|
|
}
|
|
|
|
// Field 4: patches
|
|
for (auto& it : patches_) {
|
|
it.Serialize(msg->BeginNestedMessage<::protozero::Message>(4));
|
|
}
|
|
|
|
// Field 5: has_more_patches
|
|
if (_has_field_[5]) {
|
|
msg->AppendTinyVarInt(5, has_more_patches_);
|
|
}
|
|
|
|
msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
|
|
}
|
|
|
|
|
|
CommitDataRequest_ChunkToPatch_Patch::CommitDataRequest_ChunkToPatch_Patch() = default;
|
|
CommitDataRequest_ChunkToPatch_Patch::~CommitDataRequest_ChunkToPatch_Patch() = default;
|
|
CommitDataRequest_ChunkToPatch_Patch::CommitDataRequest_ChunkToPatch_Patch(const CommitDataRequest_ChunkToPatch_Patch&) = default;
|
|
CommitDataRequest_ChunkToPatch_Patch& CommitDataRequest_ChunkToPatch_Patch::operator=(const CommitDataRequest_ChunkToPatch_Patch&) = default;
|
|
CommitDataRequest_ChunkToPatch_Patch::CommitDataRequest_ChunkToPatch_Patch(CommitDataRequest_ChunkToPatch_Patch&&) noexcept = default;
|
|
CommitDataRequest_ChunkToPatch_Patch& CommitDataRequest_ChunkToPatch_Patch::operator=(CommitDataRequest_ChunkToPatch_Patch&&) = default;
|
|
|
|
bool CommitDataRequest_ChunkToPatch_Patch::operator==(const CommitDataRequest_ChunkToPatch_Patch& other) const {
|
|
return unknown_fields_ == other.unknown_fields_
|
|
&& offset_ == other.offset_
|
|
&& data_ == other.data_;
|
|
}
|
|
|
|
bool CommitDataRequest_ChunkToPatch_Patch::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* offset */:
|
|
field.get(&offset_);
|
|
break;
|
|
case 2 /* data */:
|
|
field.get(&data_);
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string CommitDataRequest_ChunkToPatch_Patch::SerializeAsString() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> CommitDataRequest_ChunkToPatch_Patch::SerializeAsArray() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void CommitDataRequest_ChunkToPatch_Patch::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: offset
|
|
if (_has_field_[1]) {
|
|
msg->AppendVarInt(1, offset_);
|
|
}
|
|
|
|
// Field 2: data
|
|
if (_has_field_[2]) {
|
|
msg->AppendString(2, data_);
|
|
}
|
|
|
|
msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
|
|
}
|
|
|
|
|
|
CommitDataRequest_ChunksToMove::CommitDataRequest_ChunksToMove() = default;
|
|
CommitDataRequest_ChunksToMove::~CommitDataRequest_ChunksToMove() = default;
|
|
CommitDataRequest_ChunksToMove::CommitDataRequest_ChunksToMove(const CommitDataRequest_ChunksToMove&) = default;
|
|
CommitDataRequest_ChunksToMove& CommitDataRequest_ChunksToMove::operator=(const CommitDataRequest_ChunksToMove&) = default;
|
|
CommitDataRequest_ChunksToMove::CommitDataRequest_ChunksToMove(CommitDataRequest_ChunksToMove&&) noexcept = default;
|
|
CommitDataRequest_ChunksToMove& CommitDataRequest_ChunksToMove::operator=(CommitDataRequest_ChunksToMove&&) = default;
|
|
|
|
bool CommitDataRequest_ChunksToMove::operator==(const CommitDataRequest_ChunksToMove& other) const {
|
|
return unknown_fields_ == other.unknown_fields_
|
|
&& page_ == other.page_
|
|
&& chunk_ == other.chunk_
|
|
&& target_buffer_ == other.target_buffer_;
|
|
}
|
|
|
|
bool CommitDataRequest_ChunksToMove::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* page */:
|
|
field.get(&page_);
|
|
break;
|
|
case 2 /* chunk */:
|
|
field.get(&chunk_);
|
|
break;
|
|
case 3 /* target_buffer */:
|
|
field.get(&target_buffer_);
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string CommitDataRequest_ChunksToMove::SerializeAsString() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> CommitDataRequest_ChunksToMove::SerializeAsArray() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void CommitDataRequest_ChunksToMove::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: page
|
|
if (_has_field_[1]) {
|
|
msg->AppendVarInt(1, page_);
|
|
}
|
|
|
|
// Field 2: chunk
|
|
if (_has_field_[2]) {
|
|
msg->AppendVarInt(2, chunk_);
|
|
}
|
|
|
|
// Field 3: target_buffer
|
|
if (_has_field_[3]) {
|
|
msg->AppendVarInt(3, target_buffer_);
|
|
}
|
|
|
|
msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
|
|
}
|
|
|
|
} // namespace perfetto
|
|
} // namespace protos
|
|
} // namespace gen
|
|
#pragma GCC diagnostic pop
|
|
// gen_amalgamated begin source: gen/protos/perfetto/common/data_source_descriptor.gen.cc
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/scattered_heap_buffer.h"
|
|
// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
|
|
#pragma GCC diagnostic push
|
|
#pragma GCC diagnostic ignored "-Wfloat-equal"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/common/data_source_descriptor.gen.h"
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace gen {
|
|
|
|
DataSourceDescriptor::DataSourceDescriptor() = default;
|
|
DataSourceDescriptor::~DataSourceDescriptor() = default;
|
|
DataSourceDescriptor::DataSourceDescriptor(const DataSourceDescriptor&) = default;
|
|
DataSourceDescriptor& DataSourceDescriptor::operator=(const DataSourceDescriptor&) = default;
|
|
DataSourceDescriptor::DataSourceDescriptor(DataSourceDescriptor&&) noexcept = default;
|
|
DataSourceDescriptor& DataSourceDescriptor::operator=(DataSourceDescriptor&&) = default;
|
|
|
|
bool DataSourceDescriptor::operator==(const DataSourceDescriptor& other) const {
|
|
return unknown_fields_ == other.unknown_fields_
|
|
&& name_ == other.name_
|
|
&& will_notify_on_stop_ == other.will_notify_on_stop_
|
|
&& will_notify_on_start_ == other.will_notify_on_start_
|
|
&& handles_incremental_state_clear_ == other.handles_incremental_state_clear_
|
|
&& gpu_counter_descriptor_ == other.gpu_counter_descriptor_
|
|
&& track_event_descriptor_ == other.track_event_descriptor_;
|
|
}
|
|
|
|
bool DataSourceDescriptor::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* name */:
|
|
field.get(&name_);
|
|
break;
|
|
case 2 /* will_notify_on_stop */:
|
|
field.get(&will_notify_on_stop_);
|
|
break;
|
|
case 3 /* will_notify_on_start */:
|
|
field.get(&will_notify_on_start_);
|
|
break;
|
|
case 4 /* handles_incremental_state_clear */:
|
|
field.get(&handles_incremental_state_clear_);
|
|
break;
|
|
case 5 /* gpu_counter_descriptor */:
|
|
gpu_counter_descriptor_ = field.as_std_string();
|
|
break;
|
|
case 6 /* track_event_descriptor */:
|
|
track_event_descriptor_ = field.as_std_string();
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string DataSourceDescriptor::SerializeAsString() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> DataSourceDescriptor::SerializeAsArray() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void DataSourceDescriptor::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: name
|
|
if (_has_field_[1]) {
|
|
msg->AppendString(1, name_);
|
|
}
|
|
|
|
// Field 2: will_notify_on_stop
|
|
if (_has_field_[2]) {
|
|
msg->AppendTinyVarInt(2, will_notify_on_stop_);
|
|
}
|
|
|
|
// Field 3: will_notify_on_start
|
|
if (_has_field_[3]) {
|
|
msg->AppendTinyVarInt(3, will_notify_on_start_);
|
|
}
|
|
|
|
// Field 4: handles_incremental_state_clear
|
|
if (_has_field_[4]) {
|
|
msg->AppendTinyVarInt(4, handles_incremental_state_clear_);
|
|
}
|
|
|
|
// Field 5: gpu_counter_descriptor
|
|
if (_has_field_[5]) {
|
|
msg->AppendString(5, gpu_counter_descriptor_);
|
|
}
|
|
|
|
// Field 6: track_event_descriptor
|
|
if (_has_field_[6]) {
|
|
msg->AppendString(6, track_event_descriptor_);
|
|
}
|
|
|
|
msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
|
|
}
|
|
|
|
} // namespace perfetto
|
|
} // namespace protos
|
|
} // namespace gen
|
|
#pragma GCC diagnostic pop
|
|
// gen_amalgamated begin source: gen/protos/perfetto/common/descriptor.gen.cc
|
|
// gen_amalgamated begin header: gen/protos/perfetto/common/descriptor.gen.h
|
|
// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
|
|
#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_COMMON_DESCRIPTOR_PROTO_CPP_H_
|
|
#define PERFETTO_PROTOS_PROTOS_PERFETTO_COMMON_DESCRIPTOR_PROTO_CPP_H_
|
|
|
|
#include <stdint.h>
|
|
#include <bitset>
|
|
#include <vector>
|
|
#include <string>
|
|
#include <type_traits>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/cpp_message_obj.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/copyable_ptr.h"
|
|
// gen_amalgamated expanded: #include "perfetto/base/export.h"
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace gen {
|
|
class OneofOptions;
|
|
class EnumValueDescriptorProto;
|
|
class EnumDescriptorProto;
|
|
class OneofDescriptorProto;
|
|
class FieldDescriptorProto;
|
|
class DescriptorProto;
|
|
class DescriptorProto_ReservedRange;
|
|
class FileDescriptorProto;
|
|
class FileDescriptorSet;
|
|
enum FieldDescriptorProto_Type : int;
|
|
enum FieldDescriptorProto_Label : int;
|
|
} // namespace perfetto
|
|
} // namespace protos
|
|
} // namespace gen
|
|
|
|
namespace protozero {
|
|
class Message;
|
|
} // namespace protozero
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace gen {
|
|
enum FieldDescriptorProto_Type : int {
|
|
FieldDescriptorProto_Type_TYPE_DOUBLE = 1,
|
|
FieldDescriptorProto_Type_TYPE_FLOAT = 2,
|
|
FieldDescriptorProto_Type_TYPE_INT64 = 3,
|
|
FieldDescriptorProto_Type_TYPE_UINT64 = 4,
|
|
FieldDescriptorProto_Type_TYPE_INT32 = 5,
|
|
FieldDescriptorProto_Type_TYPE_FIXED64 = 6,
|
|
FieldDescriptorProto_Type_TYPE_FIXED32 = 7,
|
|
FieldDescriptorProto_Type_TYPE_BOOL = 8,
|
|
FieldDescriptorProto_Type_TYPE_STRING = 9,
|
|
FieldDescriptorProto_Type_TYPE_GROUP = 10,
|
|
FieldDescriptorProto_Type_TYPE_MESSAGE = 11,
|
|
FieldDescriptorProto_Type_TYPE_BYTES = 12,
|
|
FieldDescriptorProto_Type_TYPE_UINT32 = 13,
|
|
FieldDescriptorProto_Type_TYPE_ENUM = 14,
|
|
FieldDescriptorProto_Type_TYPE_SFIXED32 = 15,
|
|
FieldDescriptorProto_Type_TYPE_SFIXED64 = 16,
|
|
FieldDescriptorProto_Type_TYPE_SINT32 = 17,
|
|
FieldDescriptorProto_Type_TYPE_SINT64 = 18,
|
|
};
|
|
enum FieldDescriptorProto_Label : int {
|
|
FieldDescriptorProto_Label_LABEL_OPTIONAL = 1,
|
|
FieldDescriptorProto_Label_LABEL_REQUIRED = 2,
|
|
FieldDescriptorProto_Label_LABEL_REPEATED = 3,
|
|
};
|
|
|
|
class PERFETTO_EXPORT OneofOptions : public ::protozero::CppMessageObj {
|
|
public:
|
|
enum FieldNumbers {
|
|
};
|
|
|
|
OneofOptions();
|
|
~OneofOptions() override;
|
|
OneofOptions(OneofOptions&&) noexcept;
|
|
OneofOptions& operator=(OneofOptions&&);
|
|
OneofOptions(const OneofOptions&);
|
|
OneofOptions& operator=(const OneofOptions&);
|
|
bool operator==(const OneofOptions&) const;
|
|
bool operator!=(const OneofOptions& other) const { return !(*this == other); }
|
|
|
|
bool ParseFromArray(const void*, size_t) override;
|
|
std::string SerializeAsString() const override;
|
|
std::vector<uint8_t> SerializeAsArray() const override;
|
|
void Serialize(::protozero::Message*) const;
|
|
|
|
private:
|
|
|
|
// Allows to preserve unknown protobuf fields for compatibility
|
|
// with future versions of .proto files.
|
|
std::string unknown_fields_;
|
|
|
|
std::bitset<2> _has_field_{};
|
|
};
|
|
|
|
|
|
class PERFETTO_EXPORT EnumValueDescriptorProto : public ::protozero::CppMessageObj {
|
|
public:
|
|
enum FieldNumbers {
|
|
kNameFieldNumber = 1,
|
|
kNumberFieldNumber = 2,
|
|
};
|
|
|
|
EnumValueDescriptorProto();
|
|
~EnumValueDescriptorProto() override;
|
|
EnumValueDescriptorProto(EnumValueDescriptorProto&&) noexcept;
|
|
EnumValueDescriptorProto& operator=(EnumValueDescriptorProto&&);
|
|
EnumValueDescriptorProto(const EnumValueDescriptorProto&);
|
|
EnumValueDescriptorProto& operator=(const EnumValueDescriptorProto&);
|
|
bool operator==(const EnumValueDescriptorProto&) const;
|
|
bool operator!=(const EnumValueDescriptorProto& other) const { return !(*this == other); }
|
|
|
|
bool ParseFromArray(const void*, size_t) override;
|
|
std::string SerializeAsString() const override;
|
|
std::vector<uint8_t> SerializeAsArray() const override;
|
|
void Serialize(::protozero::Message*) const;
|
|
|
|
bool has_name() const { return _has_field_[1]; }
|
|
const std::string& name() const { return name_; }
|
|
void set_name(const std::string& value) { name_ = value; _has_field_.set(1); }
|
|
|
|
bool has_number() const { return _has_field_[2]; }
|
|
int32_t number() const { return number_; }
|
|
void set_number(int32_t value) { number_ = value; _has_field_.set(2); }
|
|
|
|
private:
|
|
std::string name_{};
|
|
int32_t number_{};
|
|
|
|
// Allows to preserve unknown protobuf fields for compatibility
|
|
// with future versions of .proto files.
|
|
std::string unknown_fields_;
|
|
|
|
std::bitset<3> _has_field_{};
|
|
};
|
|
|
|
|
|
class PERFETTO_EXPORT EnumDescriptorProto : public ::protozero::CppMessageObj {
|
|
public:
|
|
enum FieldNumbers {
|
|
kNameFieldNumber = 1,
|
|
kValueFieldNumber = 2,
|
|
kReservedNameFieldNumber = 5,
|
|
};
|
|
|
|
EnumDescriptorProto();
|
|
~EnumDescriptorProto() override;
|
|
EnumDescriptorProto(EnumDescriptorProto&&) noexcept;
|
|
EnumDescriptorProto& operator=(EnumDescriptorProto&&);
|
|
EnumDescriptorProto(const EnumDescriptorProto&);
|
|
EnumDescriptorProto& operator=(const EnumDescriptorProto&);
|
|
bool operator==(const EnumDescriptorProto&) const;
|
|
bool operator!=(const EnumDescriptorProto& other) const { return !(*this == other); }
|
|
|
|
bool ParseFromArray(const void*, size_t) override;
|
|
std::string SerializeAsString() const override;
|
|
std::vector<uint8_t> SerializeAsArray() const override;
|
|
void Serialize(::protozero::Message*) const;
|
|
|
|
bool has_name() const { return _has_field_[1]; }
|
|
const std::string& name() const { return name_; }
|
|
void set_name(const std::string& value) { name_ = value; _has_field_.set(1); }
|
|
|
|
int value_size() const { return static_cast<int>(value_.size()); }
|
|
const std::vector<EnumValueDescriptorProto>& value() const { return value_; }
|
|
std::vector<EnumValueDescriptorProto>* mutable_value() { return &value_; }
|
|
void clear_value() { value_.clear(); }
|
|
EnumValueDescriptorProto* add_value() { value_.emplace_back(); return &value_.back(); }
|
|
|
|
int reserved_name_size() const { return static_cast<int>(reserved_name_.size()); }
|
|
const std::vector<std::string>& reserved_name() const { return reserved_name_; }
|
|
std::vector<std::string>* mutable_reserved_name() { return &reserved_name_; }
|
|
void clear_reserved_name() { reserved_name_.clear(); }
|
|
void add_reserved_name(std::string value) { reserved_name_.emplace_back(value); }
|
|
std::string* add_reserved_name() { reserved_name_.emplace_back(); return &reserved_name_.back(); }
|
|
|
|
private:
|
|
std::string name_{};
|
|
std::vector<EnumValueDescriptorProto> value_;
|
|
std::vector<std::string> reserved_name_;
|
|
|
|
// Allows to preserve unknown protobuf fields for compatibility
|
|
// with future versions of .proto files.
|
|
std::string unknown_fields_;
|
|
|
|
std::bitset<6> _has_field_{};
|
|
};
|
|
|
|
|
|
class PERFETTO_EXPORT OneofDescriptorProto : public ::protozero::CppMessageObj {
|
|
public:
|
|
enum FieldNumbers {
|
|
kNameFieldNumber = 1,
|
|
kOptionsFieldNumber = 2,
|
|
};
|
|
|
|
OneofDescriptorProto();
|
|
~OneofDescriptorProto() override;
|
|
OneofDescriptorProto(OneofDescriptorProto&&) noexcept;
|
|
OneofDescriptorProto& operator=(OneofDescriptorProto&&);
|
|
OneofDescriptorProto(const OneofDescriptorProto&);
|
|
OneofDescriptorProto& operator=(const OneofDescriptorProto&);
|
|
bool operator==(const OneofDescriptorProto&) const;
|
|
bool operator!=(const OneofDescriptorProto& other) const { return !(*this == other); }
|
|
|
|
bool ParseFromArray(const void*, size_t) override;
|
|
std::string SerializeAsString() const override;
|
|
std::vector<uint8_t> SerializeAsArray() const override;
|
|
void Serialize(::protozero::Message*) const;
|
|
|
|
bool has_name() const { return _has_field_[1]; }
|
|
const std::string& name() const { return name_; }
|
|
void set_name(const std::string& value) { name_ = value; _has_field_.set(1); }
|
|
|
|
bool has_options() const { return _has_field_[2]; }
|
|
const OneofOptions& options() const { return *options_; }
|
|
OneofOptions* mutable_options() { _has_field_.set(2); return options_.get(); }
|
|
|
|
private:
|
|
std::string name_{};
|
|
::protozero::CopyablePtr<OneofOptions> options_;
|
|
|
|
// Allows to preserve unknown protobuf fields for compatibility
|
|
// with future versions of .proto files.
|
|
std::string unknown_fields_;
|
|
|
|
std::bitset<3> _has_field_{};
|
|
};
|
|
|
|
|
|
class PERFETTO_EXPORT FieldDescriptorProto : public ::protozero::CppMessageObj {
|
|
public:
|
|
using Type = FieldDescriptorProto_Type;
|
|
static constexpr auto TYPE_DOUBLE = FieldDescriptorProto_Type_TYPE_DOUBLE;
|
|
static constexpr auto TYPE_FLOAT = FieldDescriptorProto_Type_TYPE_FLOAT;
|
|
static constexpr auto TYPE_INT64 = FieldDescriptorProto_Type_TYPE_INT64;
|
|
static constexpr auto TYPE_UINT64 = FieldDescriptorProto_Type_TYPE_UINT64;
|
|
static constexpr auto TYPE_INT32 = FieldDescriptorProto_Type_TYPE_INT32;
|
|
static constexpr auto TYPE_FIXED64 = FieldDescriptorProto_Type_TYPE_FIXED64;
|
|
static constexpr auto TYPE_FIXED32 = FieldDescriptorProto_Type_TYPE_FIXED32;
|
|
static constexpr auto TYPE_BOOL = FieldDescriptorProto_Type_TYPE_BOOL;
|
|
static constexpr auto TYPE_STRING = FieldDescriptorProto_Type_TYPE_STRING;
|
|
static constexpr auto TYPE_GROUP = FieldDescriptorProto_Type_TYPE_GROUP;
|
|
static constexpr auto TYPE_MESSAGE = FieldDescriptorProto_Type_TYPE_MESSAGE;
|
|
static constexpr auto TYPE_BYTES = FieldDescriptorProto_Type_TYPE_BYTES;
|
|
static constexpr auto TYPE_UINT32 = FieldDescriptorProto_Type_TYPE_UINT32;
|
|
static constexpr auto TYPE_ENUM = FieldDescriptorProto_Type_TYPE_ENUM;
|
|
static constexpr auto TYPE_SFIXED32 = FieldDescriptorProto_Type_TYPE_SFIXED32;
|
|
static constexpr auto TYPE_SFIXED64 = FieldDescriptorProto_Type_TYPE_SFIXED64;
|
|
static constexpr auto TYPE_SINT32 = FieldDescriptorProto_Type_TYPE_SINT32;
|
|
static constexpr auto TYPE_SINT64 = FieldDescriptorProto_Type_TYPE_SINT64;
|
|
static constexpr auto Type_MIN = FieldDescriptorProto_Type_TYPE_DOUBLE;
|
|
static constexpr auto Type_MAX = FieldDescriptorProto_Type_TYPE_SINT64;
|
|
using Label = FieldDescriptorProto_Label;
|
|
static constexpr auto LABEL_OPTIONAL = FieldDescriptorProto_Label_LABEL_OPTIONAL;
|
|
static constexpr auto LABEL_REQUIRED = FieldDescriptorProto_Label_LABEL_REQUIRED;
|
|
static constexpr auto LABEL_REPEATED = FieldDescriptorProto_Label_LABEL_REPEATED;
|
|
static constexpr auto Label_MIN = FieldDescriptorProto_Label_LABEL_OPTIONAL;
|
|
static constexpr auto Label_MAX = FieldDescriptorProto_Label_LABEL_REPEATED;
|
|
enum FieldNumbers {
|
|
kNameFieldNumber = 1,
|
|
kNumberFieldNumber = 3,
|
|
kLabelFieldNumber = 4,
|
|
kTypeFieldNumber = 5,
|
|
kTypeNameFieldNumber = 6,
|
|
kExtendeeFieldNumber = 2,
|
|
kDefaultValueFieldNumber = 7,
|
|
kOneofIndexFieldNumber = 9,
|
|
};
|
|
|
|
FieldDescriptorProto();
|
|
~FieldDescriptorProto() override;
|
|
FieldDescriptorProto(FieldDescriptorProto&&) noexcept;
|
|
FieldDescriptorProto& operator=(FieldDescriptorProto&&);
|
|
FieldDescriptorProto(const FieldDescriptorProto&);
|
|
FieldDescriptorProto& operator=(const FieldDescriptorProto&);
|
|
bool operator==(const FieldDescriptorProto&) const;
|
|
bool operator!=(const FieldDescriptorProto& other) const { return !(*this == other); }
|
|
|
|
bool ParseFromArray(const void*, size_t) override;
|
|
std::string SerializeAsString() const override;
|
|
std::vector<uint8_t> SerializeAsArray() const override;
|
|
void Serialize(::protozero::Message*) const;
|
|
|
|
bool has_name() const { return _has_field_[1]; }
|
|
const std::string& name() const { return name_; }
|
|
void set_name(const std::string& value) { name_ = value; _has_field_.set(1); }
|
|
|
|
bool has_number() const { return _has_field_[3]; }
|
|
int32_t number() const { return number_; }
|
|
void set_number(int32_t value) { number_ = value; _has_field_.set(3); }
|
|
|
|
bool has_label() const { return _has_field_[4]; }
|
|
FieldDescriptorProto_Label label() const { return label_; }
|
|
void set_label(FieldDescriptorProto_Label value) { label_ = value; _has_field_.set(4); }
|
|
|
|
bool has_type() const { return _has_field_[5]; }
|
|
FieldDescriptorProto_Type type() const { return type_; }
|
|
void set_type(FieldDescriptorProto_Type value) { type_ = value; _has_field_.set(5); }
|
|
|
|
bool has_type_name() const { return _has_field_[6]; }
|
|
const std::string& type_name() const { return type_name_; }
|
|
void set_type_name(const std::string& value) { type_name_ = value; _has_field_.set(6); }
|
|
|
|
bool has_extendee() const { return _has_field_[2]; }
|
|
const std::string& extendee() const { return extendee_; }
|
|
void set_extendee(const std::string& value) { extendee_ = value; _has_field_.set(2); }
|
|
|
|
bool has_default_value() const { return _has_field_[7]; }
|
|
const std::string& default_value() const { return default_value_; }
|
|
void set_default_value(const std::string& value) { default_value_ = value; _has_field_.set(7); }
|
|
|
|
bool has_oneof_index() const { return _has_field_[9]; }
|
|
int32_t oneof_index() const { return oneof_index_; }
|
|
void set_oneof_index(int32_t value) { oneof_index_ = value; _has_field_.set(9); }
|
|
|
|
private:
|
|
std::string name_{};
|
|
int32_t number_{};
|
|
FieldDescriptorProto_Label label_{};
|
|
FieldDescriptorProto_Type type_{};
|
|
std::string type_name_{};
|
|
std::string extendee_{};
|
|
std::string default_value_{};
|
|
int32_t oneof_index_{};
|
|
|
|
// Allows to preserve unknown protobuf fields for compatibility
|
|
// with future versions of .proto files.
|
|
std::string unknown_fields_;
|
|
|
|
std::bitset<10> _has_field_{};
|
|
};
|
|
|
|
|
|
class PERFETTO_EXPORT DescriptorProto : public ::protozero::CppMessageObj {
|
|
public:
|
|
using ReservedRange = DescriptorProto_ReservedRange;
|
|
enum FieldNumbers {
|
|
kNameFieldNumber = 1,
|
|
kFieldFieldNumber = 2,
|
|
kExtensionFieldNumber = 6,
|
|
kNestedTypeFieldNumber = 3,
|
|
kEnumTypeFieldNumber = 4,
|
|
kOneofDeclFieldNumber = 8,
|
|
kReservedRangeFieldNumber = 9,
|
|
kReservedNameFieldNumber = 10,
|
|
};
|
|
|
|
DescriptorProto();
|
|
~DescriptorProto() override;
|
|
DescriptorProto(DescriptorProto&&) noexcept;
|
|
DescriptorProto& operator=(DescriptorProto&&);
|
|
DescriptorProto(const DescriptorProto&);
|
|
DescriptorProto& operator=(const DescriptorProto&);
|
|
bool operator==(const DescriptorProto&) const;
|
|
bool operator!=(const DescriptorProto& other) const { return !(*this == other); }
|
|
|
|
bool ParseFromArray(const void*, size_t) override;
|
|
std::string SerializeAsString() const override;
|
|
std::vector<uint8_t> SerializeAsArray() const override;
|
|
void Serialize(::protozero::Message*) const;
|
|
|
|
bool has_name() const { return _has_field_[1]; }
|
|
const std::string& name() const { return name_; }
|
|
void set_name(const std::string& value) { name_ = value; _has_field_.set(1); }
|
|
|
|
int field_size() const { return static_cast<int>(field_.size()); }
|
|
const std::vector<FieldDescriptorProto>& field() const { return field_; }
|
|
std::vector<FieldDescriptorProto>* mutable_field() { return &field_; }
|
|
void clear_field() { field_.clear(); }
|
|
FieldDescriptorProto* add_field() { field_.emplace_back(); return &field_.back(); }
|
|
|
|
int extension_size() const { return static_cast<int>(extension_.size()); }
|
|
const std::vector<FieldDescriptorProto>& extension() const { return extension_; }
|
|
std::vector<FieldDescriptorProto>* mutable_extension() { return &extension_; }
|
|
void clear_extension() { extension_.clear(); }
|
|
FieldDescriptorProto* add_extension() { extension_.emplace_back(); return &extension_.back(); }
|
|
|
|
int nested_type_size() const { return static_cast<int>(nested_type_.size()); }
|
|
const std::vector<DescriptorProto>& nested_type() const { return nested_type_; }
|
|
std::vector<DescriptorProto>* mutable_nested_type() { return &nested_type_; }
|
|
void clear_nested_type() { nested_type_.clear(); }
|
|
DescriptorProto* add_nested_type() { nested_type_.emplace_back(); return &nested_type_.back(); }
|
|
|
|
int enum_type_size() const { return static_cast<int>(enum_type_.size()); }
|
|
const std::vector<EnumDescriptorProto>& enum_type() const { return enum_type_; }
|
|
std::vector<EnumDescriptorProto>* mutable_enum_type() { return &enum_type_; }
|
|
void clear_enum_type() { enum_type_.clear(); }
|
|
EnumDescriptorProto* add_enum_type() { enum_type_.emplace_back(); return &enum_type_.back(); }
|
|
|
|
int oneof_decl_size() const { return static_cast<int>(oneof_decl_.size()); }
|
|
const std::vector<OneofDescriptorProto>& oneof_decl() const { return oneof_decl_; }
|
|
std::vector<OneofDescriptorProto>* mutable_oneof_decl() { return &oneof_decl_; }
|
|
void clear_oneof_decl() { oneof_decl_.clear(); }
|
|
OneofDescriptorProto* add_oneof_decl() { oneof_decl_.emplace_back(); return &oneof_decl_.back(); }
|
|
|
|
int reserved_range_size() const { return static_cast<int>(reserved_range_.size()); }
|
|
const std::vector<DescriptorProto_ReservedRange>& reserved_range() const { return reserved_range_; }
|
|
std::vector<DescriptorProto_ReservedRange>* mutable_reserved_range() { return &reserved_range_; }
|
|
void clear_reserved_range() { reserved_range_.clear(); }
|
|
DescriptorProto_ReservedRange* add_reserved_range() { reserved_range_.emplace_back(); return &reserved_range_.back(); }
|
|
|
|
int reserved_name_size() const { return static_cast<int>(reserved_name_.size()); }
|
|
const std::vector<std::string>& reserved_name() const { return reserved_name_; }
|
|
std::vector<std::string>* mutable_reserved_name() { return &reserved_name_; }
|
|
void clear_reserved_name() { reserved_name_.clear(); }
|
|
void add_reserved_name(std::string value) { reserved_name_.emplace_back(value); }
|
|
std::string* add_reserved_name() { reserved_name_.emplace_back(); return &reserved_name_.back(); }
|
|
|
|
private:
|
|
std::string name_{};
|
|
std::vector<FieldDescriptorProto> field_;
|
|
std::vector<FieldDescriptorProto> extension_;
|
|
std::vector<DescriptorProto> nested_type_;
|
|
std::vector<EnumDescriptorProto> enum_type_;
|
|
std::vector<OneofDescriptorProto> oneof_decl_;
|
|
std::vector<DescriptorProto_ReservedRange> reserved_range_;
|
|
std::vector<std::string> reserved_name_;
|
|
|
|
// Allows to preserve unknown protobuf fields for compatibility
|
|
// with future versions of .proto files.
|
|
std::string unknown_fields_;
|
|
|
|
std::bitset<11> _has_field_{};
|
|
};
|
|
|
|
|
|
class PERFETTO_EXPORT DescriptorProto_ReservedRange : public ::protozero::CppMessageObj {
|
|
public:
|
|
enum FieldNumbers {
|
|
kStartFieldNumber = 1,
|
|
kEndFieldNumber = 2,
|
|
};
|
|
|
|
DescriptorProto_ReservedRange();
|
|
~DescriptorProto_ReservedRange() override;
|
|
DescriptorProto_ReservedRange(DescriptorProto_ReservedRange&&) noexcept;
|
|
DescriptorProto_ReservedRange& operator=(DescriptorProto_ReservedRange&&);
|
|
DescriptorProto_ReservedRange(const DescriptorProto_ReservedRange&);
|
|
DescriptorProto_ReservedRange& operator=(const DescriptorProto_ReservedRange&);
|
|
bool operator==(const DescriptorProto_ReservedRange&) const;
|
|
bool operator!=(const DescriptorProto_ReservedRange& other) const { return !(*this == other); }
|
|
|
|
bool ParseFromArray(const void*, size_t) override;
|
|
std::string SerializeAsString() const override;
|
|
std::vector<uint8_t> SerializeAsArray() const override;
|
|
void Serialize(::protozero::Message*) const;
|
|
|
|
bool has_start() const { return _has_field_[1]; }
|
|
int32_t start() const { return start_; }
|
|
void set_start(int32_t value) { start_ = value; _has_field_.set(1); }
|
|
|
|
bool has_end() const { return _has_field_[2]; }
|
|
int32_t end() const { return end_; }
|
|
void set_end(int32_t value) { end_ = value; _has_field_.set(2); }
|
|
|
|
private:
|
|
int32_t start_{};
|
|
int32_t end_{};
|
|
|
|
// Allows to preserve unknown protobuf fields for compatibility
|
|
// with future versions of .proto files.
|
|
std::string unknown_fields_;
|
|
|
|
std::bitset<3> _has_field_{};
|
|
};
|
|
|
|
|
|
class PERFETTO_EXPORT FileDescriptorProto : public ::protozero::CppMessageObj {
|
|
public:
|
|
enum FieldNumbers {
|
|
kNameFieldNumber = 1,
|
|
kPackageFieldNumber = 2,
|
|
kDependencyFieldNumber = 3,
|
|
kPublicDependencyFieldNumber = 10,
|
|
kWeakDependencyFieldNumber = 11,
|
|
kMessageTypeFieldNumber = 4,
|
|
kEnumTypeFieldNumber = 5,
|
|
kExtensionFieldNumber = 7,
|
|
};
|
|
|
|
FileDescriptorProto();
|
|
~FileDescriptorProto() override;
|
|
FileDescriptorProto(FileDescriptorProto&&) noexcept;
|
|
FileDescriptorProto& operator=(FileDescriptorProto&&);
|
|
FileDescriptorProto(const FileDescriptorProto&);
|
|
FileDescriptorProto& operator=(const FileDescriptorProto&);
|
|
bool operator==(const FileDescriptorProto&) const;
|
|
bool operator!=(const FileDescriptorProto& other) const { return !(*this == other); }
|
|
|
|
bool ParseFromArray(const void*, size_t) override;
|
|
std::string SerializeAsString() const override;
|
|
std::vector<uint8_t> SerializeAsArray() const override;
|
|
void Serialize(::protozero::Message*) const;
|
|
|
|
bool has_name() const { return _has_field_[1]; }
|
|
const std::string& name() const { return name_; }
|
|
void set_name(const std::string& value) { name_ = value; _has_field_.set(1); }
|
|
|
|
bool has_package() const { return _has_field_[2]; }
|
|
const std::string& package() const { return package_; }
|
|
void set_package(const std::string& value) { package_ = value; _has_field_.set(2); }
|
|
|
|
int dependency_size() const { return static_cast<int>(dependency_.size()); }
|
|
const std::vector<std::string>& dependency() const { return dependency_; }
|
|
std::vector<std::string>* mutable_dependency() { return &dependency_; }
|
|
void clear_dependency() { dependency_.clear(); }
|
|
void add_dependency(std::string value) { dependency_.emplace_back(value); }
|
|
std::string* add_dependency() { dependency_.emplace_back(); return &dependency_.back(); }
|
|
|
|
int public_dependency_size() const { return static_cast<int>(public_dependency_.size()); }
|
|
const std::vector<int32_t>& public_dependency() const { return public_dependency_; }
|
|
std::vector<int32_t>* mutable_public_dependency() { return &public_dependency_; }
|
|
void clear_public_dependency() { public_dependency_.clear(); }
|
|
void add_public_dependency(int32_t value) { public_dependency_.emplace_back(value); }
|
|
int32_t* add_public_dependency() { public_dependency_.emplace_back(); return &public_dependency_.back(); }
|
|
|
|
int weak_dependency_size() const { return static_cast<int>(weak_dependency_.size()); }
|
|
const std::vector<int32_t>& weak_dependency() const { return weak_dependency_; }
|
|
std::vector<int32_t>* mutable_weak_dependency() { return &weak_dependency_; }
|
|
void clear_weak_dependency() { weak_dependency_.clear(); }
|
|
void add_weak_dependency(int32_t value) { weak_dependency_.emplace_back(value); }
|
|
int32_t* add_weak_dependency() { weak_dependency_.emplace_back(); return &weak_dependency_.back(); }
|
|
|
|
int message_type_size() const { return static_cast<int>(message_type_.size()); }
|
|
const std::vector<DescriptorProto>& message_type() const { return message_type_; }
|
|
std::vector<DescriptorProto>* mutable_message_type() { return &message_type_; }
|
|
void clear_message_type() { message_type_.clear(); }
|
|
DescriptorProto* add_message_type() { message_type_.emplace_back(); return &message_type_.back(); }
|
|
|
|
int enum_type_size() const { return static_cast<int>(enum_type_.size()); }
|
|
const std::vector<EnumDescriptorProto>& enum_type() const { return enum_type_; }
|
|
std::vector<EnumDescriptorProto>* mutable_enum_type() { return &enum_type_; }
|
|
void clear_enum_type() { enum_type_.clear(); }
|
|
EnumDescriptorProto* add_enum_type() { enum_type_.emplace_back(); return &enum_type_.back(); }
|
|
|
|
int extension_size() const { return static_cast<int>(extension_.size()); }
|
|
const std::vector<FieldDescriptorProto>& extension() const { return extension_; }
|
|
std::vector<FieldDescriptorProto>* mutable_extension() { return &extension_; }
|
|
void clear_extension() { extension_.clear(); }
|
|
FieldDescriptorProto* add_extension() { extension_.emplace_back(); return &extension_.back(); }
|
|
|
|
private:
|
|
std::string name_{};
|
|
std::string package_{};
|
|
std::vector<std::string> dependency_;
|
|
std::vector<int32_t> public_dependency_;
|
|
std::vector<int32_t> weak_dependency_;
|
|
std::vector<DescriptorProto> message_type_;
|
|
std::vector<EnumDescriptorProto> enum_type_;
|
|
std::vector<FieldDescriptorProto> extension_;
|
|
|
|
// Allows to preserve unknown protobuf fields for compatibility
|
|
// with future versions of .proto files.
|
|
std::string unknown_fields_;
|
|
|
|
std::bitset<12> _has_field_{};
|
|
};
|
|
|
|
|
|
class PERFETTO_EXPORT FileDescriptorSet : public ::protozero::CppMessageObj {
|
|
public:
|
|
enum FieldNumbers {
|
|
kFileFieldNumber = 1,
|
|
};
|
|
|
|
FileDescriptorSet();
|
|
~FileDescriptorSet() override;
|
|
FileDescriptorSet(FileDescriptorSet&&) noexcept;
|
|
FileDescriptorSet& operator=(FileDescriptorSet&&);
|
|
FileDescriptorSet(const FileDescriptorSet&);
|
|
FileDescriptorSet& operator=(const FileDescriptorSet&);
|
|
bool operator==(const FileDescriptorSet&) const;
|
|
bool operator!=(const FileDescriptorSet& other) const { return !(*this == other); }
|
|
|
|
bool ParseFromArray(const void*, size_t) override;
|
|
std::string SerializeAsString() const override;
|
|
std::vector<uint8_t> SerializeAsArray() const override;
|
|
void Serialize(::protozero::Message*) const;
|
|
|
|
int file_size() const { return static_cast<int>(file_.size()); }
|
|
const std::vector<FileDescriptorProto>& file() const { return file_; }
|
|
std::vector<FileDescriptorProto>* mutable_file() { return &file_; }
|
|
void clear_file() { file_.clear(); }
|
|
FileDescriptorProto* add_file() { file_.emplace_back(); return &file_.back(); }
|
|
|
|
private:
|
|
std::vector<FileDescriptorProto> file_;
|
|
|
|
// Allows to preserve unknown protobuf fields for compatibility
|
|
// with future versions of .proto files.
|
|
std::string unknown_fields_;
|
|
|
|
std::bitset<2> _has_field_{};
|
|
};
|
|
|
|
} // namespace perfetto
|
|
} // namespace protos
|
|
} // namespace gen
|
|
|
|
#endif // PERFETTO_PROTOS_PROTOS_PERFETTO_COMMON_DESCRIPTOR_PROTO_CPP_H_
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/scattered_heap_buffer.h"
|
|
// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
|
|
#pragma GCC diagnostic push
|
|
#pragma GCC diagnostic ignored "-Wfloat-equal"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/common/descriptor.gen.h"
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace gen {
|
|
|
|
OneofOptions::OneofOptions() = default;
|
|
OneofOptions::~OneofOptions() = default;
|
|
OneofOptions::OneofOptions(const OneofOptions&) = default;
|
|
OneofOptions& OneofOptions::operator=(const OneofOptions&) = default;
|
|
OneofOptions::OneofOptions(OneofOptions&&) noexcept = default;
|
|
OneofOptions& OneofOptions::operator=(OneofOptions&&) = default;
|
|
|
|
bool OneofOptions::operator==(const OneofOptions& other) const {
|
|
return unknown_fields_ == other.unknown_fields_;
|
|
}
|
|
|
|
bool OneofOptions::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string OneofOptions::SerializeAsString() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> OneofOptions::SerializeAsArray() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void OneofOptions::Serialize(::protozero::Message* msg) const {
|
|
msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
|
|
}
|
|
|
|
|
|
EnumValueDescriptorProto::EnumValueDescriptorProto() = default;
|
|
EnumValueDescriptorProto::~EnumValueDescriptorProto() = default;
|
|
EnumValueDescriptorProto::EnumValueDescriptorProto(const EnumValueDescriptorProto&) = default;
|
|
EnumValueDescriptorProto& EnumValueDescriptorProto::operator=(const EnumValueDescriptorProto&) = default;
|
|
EnumValueDescriptorProto::EnumValueDescriptorProto(EnumValueDescriptorProto&&) noexcept = default;
|
|
EnumValueDescriptorProto& EnumValueDescriptorProto::operator=(EnumValueDescriptorProto&&) = default;
|
|
|
|
bool EnumValueDescriptorProto::operator==(const EnumValueDescriptorProto& other) const {
|
|
return unknown_fields_ == other.unknown_fields_
|
|
&& name_ == other.name_
|
|
&& number_ == other.number_;
|
|
}
|
|
|
|
bool EnumValueDescriptorProto::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* name */:
|
|
field.get(&name_);
|
|
break;
|
|
case 2 /* number */:
|
|
field.get(&number_);
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string EnumValueDescriptorProto::SerializeAsString() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> EnumValueDescriptorProto::SerializeAsArray() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void EnumValueDescriptorProto::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: name
|
|
if (_has_field_[1]) {
|
|
msg->AppendString(1, name_);
|
|
}
|
|
|
|
// Field 2: number
|
|
if (_has_field_[2]) {
|
|
msg->AppendVarInt(2, number_);
|
|
}
|
|
|
|
msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
|
|
}
|
|
|
|
|
|
EnumDescriptorProto::EnumDescriptorProto() = default;
|
|
EnumDescriptorProto::~EnumDescriptorProto() = default;
|
|
EnumDescriptorProto::EnumDescriptorProto(const EnumDescriptorProto&) = default;
|
|
EnumDescriptorProto& EnumDescriptorProto::operator=(const EnumDescriptorProto&) = default;
|
|
EnumDescriptorProto::EnumDescriptorProto(EnumDescriptorProto&&) noexcept = default;
|
|
EnumDescriptorProto& EnumDescriptorProto::operator=(EnumDescriptorProto&&) = default;
|
|
|
|
bool EnumDescriptorProto::operator==(const EnumDescriptorProto& other) const {
|
|
return unknown_fields_ == other.unknown_fields_
|
|
&& name_ == other.name_
|
|
&& value_ == other.value_
|
|
&& reserved_name_ == other.reserved_name_;
|
|
}
|
|
|
|
bool EnumDescriptorProto::ParseFromArray(const void* raw, size_t size) {
|
|
value_.clear();
|
|
reserved_name_.clear();
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* name */:
|
|
field.get(&name_);
|
|
break;
|
|
case 2 /* value */:
|
|
value_.emplace_back();
|
|
value_.back().ParseFromString(field.as_std_string());
|
|
break;
|
|
case 5 /* reserved_name */:
|
|
reserved_name_.emplace_back();
|
|
field.get(&reserved_name_.back());
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string EnumDescriptorProto::SerializeAsString() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> EnumDescriptorProto::SerializeAsArray() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void EnumDescriptorProto::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: name
|
|
if (_has_field_[1]) {
|
|
msg->AppendString(1, name_);
|
|
}
|
|
|
|
// Field 2: value
|
|
for (auto& it : value_) {
|
|
it.Serialize(msg->BeginNestedMessage<::protozero::Message>(2));
|
|
}
|
|
|
|
// Field 5: reserved_name
|
|
for (auto& it : reserved_name_) {
|
|
msg->AppendString(5, it);
|
|
}
|
|
|
|
msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
|
|
}
|
|
|
|
|
|
OneofDescriptorProto::OneofDescriptorProto() = default;
|
|
OneofDescriptorProto::~OneofDescriptorProto() = default;
|
|
OneofDescriptorProto::OneofDescriptorProto(const OneofDescriptorProto&) = default;
|
|
OneofDescriptorProto& OneofDescriptorProto::operator=(const OneofDescriptorProto&) = default;
|
|
OneofDescriptorProto::OneofDescriptorProto(OneofDescriptorProto&&) noexcept = default;
|
|
OneofDescriptorProto& OneofDescriptorProto::operator=(OneofDescriptorProto&&) = default;
|
|
|
|
bool OneofDescriptorProto::operator==(const OneofDescriptorProto& other) const {
|
|
return unknown_fields_ == other.unknown_fields_
|
|
&& name_ == other.name_
|
|
&& options_ == other.options_;
|
|
}
|
|
|
|
bool OneofDescriptorProto::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* name */:
|
|
field.get(&name_);
|
|
break;
|
|
case 2 /* options */:
|
|
(*options_).ParseFromString(field.as_std_string());
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string OneofDescriptorProto::SerializeAsString() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> OneofDescriptorProto::SerializeAsArray() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void OneofDescriptorProto::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: name
|
|
if (_has_field_[1]) {
|
|
msg->AppendString(1, name_);
|
|
}
|
|
|
|
// Field 2: options
|
|
if (_has_field_[2]) {
|
|
(*options_).Serialize(msg->BeginNestedMessage<::protozero::Message>(2));
|
|
}
|
|
|
|
msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
|
|
}
|
|
|
|
|
|
FieldDescriptorProto::FieldDescriptorProto() = default;
|
|
FieldDescriptorProto::~FieldDescriptorProto() = default;
|
|
FieldDescriptorProto::FieldDescriptorProto(const FieldDescriptorProto&) = default;
|
|
FieldDescriptorProto& FieldDescriptorProto::operator=(const FieldDescriptorProto&) = default;
|
|
FieldDescriptorProto::FieldDescriptorProto(FieldDescriptorProto&&) noexcept = default;
|
|
FieldDescriptorProto& FieldDescriptorProto::operator=(FieldDescriptorProto&&) = default;
|
|
|
|
bool FieldDescriptorProto::operator==(const FieldDescriptorProto& other) const {
|
|
return unknown_fields_ == other.unknown_fields_
|
|
&& name_ == other.name_
|
|
&& number_ == other.number_
|
|
&& label_ == other.label_
|
|
&& type_ == other.type_
|
|
&& type_name_ == other.type_name_
|
|
&& extendee_ == other.extendee_
|
|
&& default_value_ == other.default_value_
|
|
&& oneof_index_ == other.oneof_index_;
|
|
}
|
|
|
|
bool FieldDescriptorProto::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* name */:
|
|
field.get(&name_);
|
|
break;
|
|
case 3 /* number */:
|
|
field.get(&number_);
|
|
break;
|
|
case 4 /* label */:
|
|
field.get(&label_);
|
|
break;
|
|
case 5 /* type */:
|
|
field.get(&type_);
|
|
break;
|
|
case 6 /* type_name */:
|
|
field.get(&type_name_);
|
|
break;
|
|
case 2 /* extendee */:
|
|
field.get(&extendee_);
|
|
break;
|
|
case 7 /* default_value */:
|
|
field.get(&default_value_);
|
|
break;
|
|
case 9 /* oneof_index */:
|
|
field.get(&oneof_index_);
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string FieldDescriptorProto::SerializeAsString() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> FieldDescriptorProto::SerializeAsArray() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void FieldDescriptorProto::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: name
|
|
if (_has_field_[1]) {
|
|
msg->AppendString(1, name_);
|
|
}
|
|
|
|
// Field 3: number
|
|
if (_has_field_[3]) {
|
|
msg->AppendVarInt(3, number_);
|
|
}
|
|
|
|
// Field 4: label
|
|
if (_has_field_[4]) {
|
|
msg->AppendVarInt(4, label_);
|
|
}
|
|
|
|
// Field 5: type
|
|
if (_has_field_[5]) {
|
|
msg->AppendVarInt(5, type_);
|
|
}
|
|
|
|
// Field 6: type_name
|
|
if (_has_field_[6]) {
|
|
msg->AppendString(6, type_name_);
|
|
}
|
|
|
|
// Field 2: extendee
|
|
if (_has_field_[2]) {
|
|
msg->AppendString(2, extendee_);
|
|
}
|
|
|
|
// Field 7: default_value
|
|
if (_has_field_[7]) {
|
|
msg->AppendString(7, default_value_);
|
|
}
|
|
|
|
// Field 9: oneof_index
|
|
if (_has_field_[9]) {
|
|
msg->AppendVarInt(9, oneof_index_);
|
|
}
|
|
|
|
msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
|
|
}
|
|
|
|
|
|
DescriptorProto::DescriptorProto() = default;
|
|
DescriptorProto::~DescriptorProto() = default;
|
|
DescriptorProto::DescriptorProto(const DescriptorProto&) = default;
|
|
DescriptorProto& DescriptorProto::operator=(const DescriptorProto&) = default;
|
|
DescriptorProto::DescriptorProto(DescriptorProto&&) noexcept = default;
|
|
DescriptorProto& DescriptorProto::operator=(DescriptorProto&&) = default;
|
|
|
|
bool DescriptorProto::operator==(const DescriptorProto& other) const {
|
|
return unknown_fields_ == other.unknown_fields_
|
|
&& name_ == other.name_
|
|
&& field_ == other.field_
|
|
&& extension_ == other.extension_
|
|
&& nested_type_ == other.nested_type_
|
|
&& enum_type_ == other.enum_type_
|
|
&& oneof_decl_ == other.oneof_decl_
|
|
&& reserved_range_ == other.reserved_range_
|
|
&& reserved_name_ == other.reserved_name_;
|
|
}
|
|
|
|
bool DescriptorProto::ParseFromArray(const void* raw, size_t size) {
|
|
field_.clear();
|
|
extension_.clear();
|
|
nested_type_.clear();
|
|
enum_type_.clear();
|
|
oneof_decl_.clear();
|
|
reserved_range_.clear();
|
|
reserved_name_.clear();
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* name */:
|
|
field.get(&name_);
|
|
break;
|
|
case 2 /* field */:
|
|
field_.emplace_back();
|
|
field_.back().ParseFromString(field.as_std_string());
|
|
break;
|
|
case 6 /* extension */:
|
|
extension_.emplace_back();
|
|
extension_.back().ParseFromString(field.as_std_string());
|
|
break;
|
|
case 3 /* nested_type */:
|
|
nested_type_.emplace_back();
|
|
nested_type_.back().ParseFromString(field.as_std_string());
|
|
break;
|
|
case 4 /* enum_type */:
|
|
enum_type_.emplace_back();
|
|
enum_type_.back().ParseFromString(field.as_std_string());
|
|
break;
|
|
case 8 /* oneof_decl */:
|
|
oneof_decl_.emplace_back();
|
|
oneof_decl_.back().ParseFromString(field.as_std_string());
|
|
break;
|
|
case 9 /* reserved_range */:
|
|
reserved_range_.emplace_back();
|
|
reserved_range_.back().ParseFromString(field.as_std_string());
|
|
break;
|
|
case 10 /* reserved_name */:
|
|
reserved_name_.emplace_back();
|
|
field.get(&reserved_name_.back());
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string DescriptorProto::SerializeAsString() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> DescriptorProto::SerializeAsArray() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void DescriptorProto::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: name
|
|
if (_has_field_[1]) {
|
|
msg->AppendString(1, name_);
|
|
}
|
|
|
|
// Field 2: field
|
|
for (auto& it : field_) {
|
|
it.Serialize(msg->BeginNestedMessage<::protozero::Message>(2));
|
|
}
|
|
|
|
// Field 6: extension
|
|
for (auto& it : extension_) {
|
|
it.Serialize(msg->BeginNestedMessage<::protozero::Message>(6));
|
|
}
|
|
|
|
// Field 3: nested_type
|
|
for (auto& it : nested_type_) {
|
|
it.Serialize(msg->BeginNestedMessage<::protozero::Message>(3));
|
|
}
|
|
|
|
// Field 4: enum_type
|
|
for (auto& it : enum_type_) {
|
|
it.Serialize(msg->BeginNestedMessage<::protozero::Message>(4));
|
|
}
|
|
|
|
// Field 8: oneof_decl
|
|
for (auto& it : oneof_decl_) {
|
|
it.Serialize(msg->BeginNestedMessage<::protozero::Message>(8));
|
|
}
|
|
|
|
// Field 9: reserved_range
|
|
for (auto& it : reserved_range_) {
|
|
it.Serialize(msg->BeginNestedMessage<::protozero::Message>(9));
|
|
}
|
|
|
|
// Field 10: reserved_name
|
|
for (auto& it : reserved_name_) {
|
|
msg->AppendString(10, it);
|
|
}
|
|
|
|
msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
|
|
}
|
|
|
|
|
|
DescriptorProto_ReservedRange::DescriptorProto_ReservedRange() = default;
|
|
DescriptorProto_ReservedRange::~DescriptorProto_ReservedRange() = default;
|
|
DescriptorProto_ReservedRange::DescriptorProto_ReservedRange(const DescriptorProto_ReservedRange&) = default;
|
|
DescriptorProto_ReservedRange& DescriptorProto_ReservedRange::operator=(const DescriptorProto_ReservedRange&) = default;
|
|
DescriptorProto_ReservedRange::DescriptorProto_ReservedRange(DescriptorProto_ReservedRange&&) noexcept = default;
|
|
DescriptorProto_ReservedRange& DescriptorProto_ReservedRange::operator=(DescriptorProto_ReservedRange&&) = default;
|
|
|
|
bool DescriptorProto_ReservedRange::operator==(const DescriptorProto_ReservedRange& other) const {
|
|
return unknown_fields_ == other.unknown_fields_
|
|
&& start_ == other.start_
|
|
&& end_ == other.end_;
|
|
}
|
|
|
|
bool DescriptorProto_ReservedRange::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* start */:
|
|
field.get(&start_);
|
|
break;
|
|
case 2 /* end */:
|
|
field.get(&end_);
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string DescriptorProto_ReservedRange::SerializeAsString() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> DescriptorProto_ReservedRange::SerializeAsArray() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void DescriptorProto_ReservedRange::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: start
|
|
if (_has_field_[1]) {
|
|
msg->AppendVarInt(1, start_);
|
|
}
|
|
|
|
// Field 2: end
|
|
if (_has_field_[2]) {
|
|
msg->AppendVarInt(2, end_);
|
|
}
|
|
|
|
msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
|
|
}
|
|
|
|
|
|
FileDescriptorProto::FileDescriptorProto() = default;
|
|
FileDescriptorProto::~FileDescriptorProto() = default;
|
|
FileDescriptorProto::FileDescriptorProto(const FileDescriptorProto&) = default;
|
|
FileDescriptorProto& FileDescriptorProto::operator=(const FileDescriptorProto&) = default;
|
|
FileDescriptorProto::FileDescriptorProto(FileDescriptorProto&&) noexcept = default;
|
|
FileDescriptorProto& FileDescriptorProto::operator=(FileDescriptorProto&&) = default;
|
|
|
|
bool FileDescriptorProto::operator==(const FileDescriptorProto& other) const {
|
|
return unknown_fields_ == other.unknown_fields_
|
|
&& name_ == other.name_
|
|
&& package_ == other.package_
|
|
&& dependency_ == other.dependency_
|
|
&& public_dependency_ == other.public_dependency_
|
|
&& weak_dependency_ == other.weak_dependency_
|
|
&& message_type_ == other.message_type_
|
|
&& enum_type_ == other.enum_type_
|
|
&& extension_ == other.extension_;
|
|
}
|
|
|
|
bool FileDescriptorProto::ParseFromArray(const void* raw, size_t size) {
|
|
dependency_.clear();
|
|
public_dependency_.clear();
|
|
weak_dependency_.clear();
|
|
message_type_.clear();
|
|
enum_type_.clear();
|
|
extension_.clear();
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* name */:
|
|
field.get(&name_);
|
|
break;
|
|
case 2 /* package */:
|
|
field.get(&package_);
|
|
break;
|
|
case 3 /* dependency */:
|
|
dependency_.emplace_back();
|
|
field.get(&dependency_.back());
|
|
break;
|
|
case 10 /* public_dependency */:
|
|
public_dependency_.emplace_back();
|
|
field.get(&public_dependency_.back());
|
|
break;
|
|
case 11 /* weak_dependency */:
|
|
weak_dependency_.emplace_back();
|
|
field.get(&weak_dependency_.back());
|
|
break;
|
|
case 4 /* message_type */:
|
|
message_type_.emplace_back();
|
|
message_type_.back().ParseFromString(field.as_std_string());
|
|
break;
|
|
case 5 /* enum_type */:
|
|
enum_type_.emplace_back();
|
|
enum_type_.back().ParseFromString(field.as_std_string());
|
|
break;
|
|
case 7 /* extension */:
|
|
extension_.emplace_back();
|
|
extension_.back().ParseFromString(field.as_std_string());
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string FileDescriptorProto::SerializeAsString() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> FileDescriptorProto::SerializeAsArray() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void FileDescriptorProto::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: name
|
|
if (_has_field_[1]) {
|
|
msg->AppendString(1, name_);
|
|
}
|
|
|
|
// Field 2: package
|
|
if (_has_field_[2]) {
|
|
msg->AppendString(2, package_);
|
|
}
|
|
|
|
// Field 3: dependency
|
|
for (auto& it : dependency_) {
|
|
msg->AppendString(3, it);
|
|
}
|
|
|
|
// Field 10: public_dependency
|
|
for (auto& it : public_dependency_) {
|
|
msg->AppendVarInt(10, it);
|
|
}
|
|
|
|
// Field 11: weak_dependency
|
|
for (auto& it : weak_dependency_) {
|
|
msg->AppendVarInt(11, it);
|
|
}
|
|
|
|
// Field 4: message_type
|
|
for (auto& it : message_type_) {
|
|
it.Serialize(msg->BeginNestedMessage<::protozero::Message>(4));
|
|
}
|
|
|
|
// Field 5: enum_type
|
|
for (auto& it : enum_type_) {
|
|
it.Serialize(msg->BeginNestedMessage<::protozero::Message>(5));
|
|
}
|
|
|
|
// Field 7: extension
|
|
for (auto& it : extension_) {
|
|
it.Serialize(msg->BeginNestedMessage<::protozero::Message>(7));
|
|
}
|
|
|
|
msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
|
|
}
|
|
|
|
|
|
FileDescriptorSet::FileDescriptorSet() = default;
|
|
FileDescriptorSet::~FileDescriptorSet() = default;
|
|
FileDescriptorSet::FileDescriptorSet(const FileDescriptorSet&) = default;
|
|
FileDescriptorSet& FileDescriptorSet::operator=(const FileDescriptorSet&) = default;
|
|
FileDescriptorSet::FileDescriptorSet(FileDescriptorSet&&) noexcept = default;
|
|
FileDescriptorSet& FileDescriptorSet::operator=(FileDescriptorSet&&) = default;
|
|
|
|
bool FileDescriptorSet::operator==(const FileDescriptorSet& other) const {
|
|
return unknown_fields_ == other.unknown_fields_
|
|
&& file_ == other.file_;
|
|
}
|
|
|
|
bool FileDescriptorSet::ParseFromArray(const void* raw, size_t size) {
|
|
file_.clear();
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* file */:
|
|
file_.emplace_back();
|
|
file_.back().ParseFromString(field.as_std_string());
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string FileDescriptorSet::SerializeAsString() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> FileDescriptorSet::SerializeAsArray() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void FileDescriptorSet::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: file
|
|
for (auto& it : file_) {
|
|
it.Serialize(msg->BeginNestedMessage<::protozero::Message>(1));
|
|
}
|
|
|
|
msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
|
|
}
|
|
|
|
} // namespace perfetto
|
|
} // namespace protos
|
|
} // namespace gen
|
|
#pragma GCC diagnostic pop
|
|
// gen_amalgamated begin source: gen/protos/perfetto/common/gpu_counter_descriptor.gen.cc
|
|
// gen_amalgamated begin header: gen/protos/perfetto/common/gpu_counter_descriptor.gen.h
|
|
// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
|
|
#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_COMMON_GPU_COUNTER_DESCRIPTOR_PROTO_CPP_H_
|
|
#define PERFETTO_PROTOS_PROTOS_PERFETTO_COMMON_GPU_COUNTER_DESCRIPTOR_PROTO_CPP_H_
|
|
|
|
#include <stdint.h>
|
|
#include <bitset>
|
|
#include <vector>
|
|
#include <string>
|
|
#include <type_traits>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/cpp_message_obj.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/copyable_ptr.h"
|
|
// gen_amalgamated expanded: #include "perfetto/base/export.h"
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace gen {
|
|
class GpuCounterDescriptor;
|
|
class GpuCounterDescriptor_GpuCounterBlock;
|
|
class GpuCounterDescriptor_GpuCounterSpec;
|
|
enum GpuCounterDescriptor_GpuCounterGroup : int;
|
|
enum GpuCounterDescriptor_MeasureUnit : int;
|
|
} // namespace perfetto
|
|
} // namespace protos
|
|
} // namespace gen
|
|
|
|
namespace protozero {
|
|
class Message;
|
|
} // namespace protozero
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace gen {
|
|
enum GpuCounterDescriptor_GpuCounterGroup : int {
|
|
GpuCounterDescriptor_GpuCounterGroup_UNCLASSIFIED = 0,
|
|
GpuCounterDescriptor_GpuCounterGroup_SYSTEM = 1,
|
|
GpuCounterDescriptor_GpuCounterGroup_VERTICES = 2,
|
|
GpuCounterDescriptor_GpuCounterGroup_FRAGMENTS = 3,
|
|
GpuCounterDescriptor_GpuCounterGroup_PRIMITIVES = 4,
|
|
GpuCounterDescriptor_GpuCounterGroup_MEMORY = 5,
|
|
GpuCounterDescriptor_GpuCounterGroup_COMPUTE = 6,
|
|
};
|
|
enum GpuCounterDescriptor_MeasureUnit : int {
|
|
GpuCounterDescriptor_MeasureUnit_NONE = 0,
|
|
GpuCounterDescriptor_MeasureUnit_BIT = 1,
|
|
GpuCounterDescriptor_MeasureUnit_KILOBIT = 2,
|
|
GpuCounterDescriptor_MeasureUnit_MEGABIT = 3,
|
|
GpuCounterDescriptor_MeasureUnit_GIGABIT = 4,
|
|
GpuCounterDescriptor_MeasureUnit_TERABIT = 5,
|
|
GpuCounterDescriptor_MeasureUnit_PETABIT = 6,
|
|
GpuCounterDescriptor_MeasureUnit_BYTE = 7,
|
|
GpuCounterDescriptor_MeasureUnit_KILOBYTE = 8,
|
|
GpuCounterDescriptor_MeasureUnit_MEGABYTE = 9,
|
|
GpuCounterDescriptor_MeasureUnit_GIGABYTE = 10,
|
|
GpuCounterDescriptor_MeasureUnit_TERABYTE = 11,
|
|
GpuCounterDescriptor_MeasureUnit_PETABYTE = 12,
|
|
GpuCounterDescriptor_MeasureUnit_HERTZ = 13,
|
|
GpuCounterDescriptor_MeasureUnit_KILOHERTZ = 14,
|
|
GpuCounterDescriptor_MeasureUnit_MEGAHERTZ = 15,
|
|
GpuCounterDescriptor_MeasureUnit_GIGAHERTZ = 16,
|
|
GpuCounterDescriptor_MeasureUnit_TERAHERTZ = 17,
|
|
GpuCounterDescriptor_MeasureUnit_PETAHERTZ = 18,
|
|
GpuCounterDescriptor_MeasureUnit_NANOSECOND = 19,
|
|
GpuCounterDescriptor_MeasureUnit_MICROSECOND = 20,
|
|
GpuCounterDescriptor_MeasureUnit_MILLISECOND = 21,
|
|
GpuCounterDescriptor_MeasureUnit_SECOND = 22,
|
|
GpuCounterDescriptor_MeasureUnit_MINUTE = 23,
|
|
GpuCounterDescriptor_MeasureUnit_HOUR = 24,
|
|
GpuCounterDescriptor_MeasureUnit_VERTEX = 25,
|
|
GpuCounterDescriptor_MeasureUnit_PIXEL = 26,
|
|
GpuCounterDescriptor_MeasureUnit_TRIANGLE = 27,
|
|
GpuCounterDescriptor_MeasureUnit_PRIMITIVE = 38,
|
|
GpuCounterDescriptor_MeasureUnit_FRAGMENT = 39,
|
|
GpuCounterDescriptor_MeasureUnit_MILLIWATT = 28,
|
|
GpuCounterDescriptor_MeasureUnit_WATT = 29,
|
|
GpuCounterDescriptor_MeasureUnit_KILOWATT = 30,
|
|
GpuCounterDescriptor_MeasureUnit_JOULE = 31,
|
|
GpuCounterDescriptor_MeasureUnit_VOLT = 32,
|
|
GpuCounterDescriptor_MeasureUnit_AMPERE = 33,
|
|
GpuCounterDescriptor_MeasureUnit_CELSIUS = 34,
|
|
GpuCounterDescriptor_MeasureUnit_FAHRENHEIT = 35,
|
|
GpuCounterDescriptor_MeasureUnit_KELVIN = 36,
|
|
GpuCounterDescriptor_MeasureUnit_PERCENT = 37,
|
|
GpuCounterDescriptor_MeasureUnit_INSTRUCTION = 40,
|
|
};
|
|
|
|
class PERFETTO_EXPORT GpuCounterDescriptor : public ::protozero::CppMessageObj {
|
|
public:
|
|
using GpuCounterSpec = GpuCounterDescriptor_GpuCounterSpec;
|
|
using GpuCounterBlock = GpuCounterDescriptor_GpuCounterBlock;
|
|
using GpuCounterGroup = GpuCounterDescriptor_GpuCounterGroup;
|
|
static constexpr auto UNCLASSIFIED = GpuCounterDescriptor_GpuCounterGroup_UNCLASSIFIED;
|
|
static constexpr auto SYSTEM = GpuCounterDescriptor_GpuCounterGroup_SYSTEM;
|
|
static constexpr auto VERTICES = GpuCounterDescriptor_GpuCounterGroup_VERTICES;
|
|
static constexpr auto FRAGMENTS = GpuCounterDescriptor_GpuCounterGroup_FRAGMENTS;
|
|
static constexpr auto PRIMITIVES = GpuCounterDescriptor_GpuCounterGroup_PRIMITIVES;
|
|
static constexpr auto MEMORY = GpuCounterDescriptor_GpuCounterGroup_MEMORY;
|
|
static constexpr auto COMPUTE = GpuCounterDescriptor_GpuCounterGroup_COMPUTE;
|
|
static constexpr auto GpuCounterGroup_MIN = GpuCounterDescriptor_GpuCounterGroup_UNCLASSIFIED;
|
|
static constexpr auto GpuCounterGroup_MAX = GpuCounterDescriptor_GpuCounterGroup_COMPUTE;
|
|
using MeasureUnit = GpuCounterDescriptor_MeasureUnit;
|
|
static constexpr auto NONE = GpuCounterDescriptor_MeasureUnit_NONE;
|
|
static constexpr auto BIT = GpuCounterDescriptor_MeasureUnit_BIT;
|
|
static constexpr auto KILOBIT = GpuCounterDescriptor_MeasureUnit_KILOBIT;
|
|
static constexpr auto MEGABIT = GpuCounterDescriptor_MeasureUnit_MEGABIT;
|
|
static constexpr auto GIGABIT = GpuCounterDescriptor_MeasureUnit_GIGABIT;
|
|
static constexpr auto TERABIT = GpuCounterDescriptor_MeasureUnit_TERABIT;
|
|
static constexpr auto PETABIT = GpuCounterDescriptor_MeasureUnit_PETABIT;
|
|
static constexpr auto BYTE = GpuCounterDescriptor_MeasureUnit_BYTE;
|
|
static constexpr auto KILOBYTE = GpuCounterDescriptor_MeasureUnit_KILOBYTE;
|
|
static constexpr auto MEGABYTE = GpuCounterDescriptor_MeasureUnit_MEGABYTE;
|
|
static constexpr auto GIGABYTE = GpuCounterDescriptor_MeasureUnit_GIGABYTE;
|
|
static constexpr auto TERABYTE = GpuCounterDescriptor_MeasureUnit_TERABYTE;
|
|
static constexpr auto PETABYTE = GpuCounterDescriptor_MeasureUnit_PETABYTE;
|
|
static constexpr auto HERTZ = GpuCounterDescriptor_MeasureUnit_HERTZ;
|
|
static constexpr auto KILOHERTZ = GpuCounterDescriptor_MeasureUnit_KILOHERTZ;
|
|
static constexpr auto MEGAHERTZ = GpuCounterDescriptor_MeasureUnit_MEGAHERTZ;
|
|
static constexpr auto GIGAHERTZ = GpuCounterDescriptor_MeasureUnit_GIGAHERTZ;
|
|
static constexpr auto TERAHERTZ = GpuCounterDescriptor_MeasureUnit_TERAHERTZ;
|
|
static constexpr auto PETAHERTZ = GpuCounterDescriptor_MeasureUnit_PETAHERTZ;
|
|
static constexpr auto NANOSECOND = GpuCounterDescriptor_MeasureUnit_NANOSECOND;
|
|
static constexpr auto MICROSECOND = GpuCounterDescriptor_MeasureUnit_MICROSECOND;
|
|
static constexpr auto MILLISECOND = GpuCounterDescriptor_MeasureUnit_MILLISECOND;
|
|
static constexpr auto SECOND = GpuCounterDescriptor_MeasureUnit_SECOND;
|
|
static constexpr auto MINUTE = GpuCounterDescriptor_MeasureUnit_MINUTE;
|
|
static constexpr auto HOUR = GpuCounterDescriptor_MeasureUnit_HOUR;
|
|
static constexpr auto VERTEX = GpuCounterDescriptor_MeasureUnit_VERTEX;
|
|
static constexpr auto PIXEL = GpuCounterDescriptor_MeasureUnit_PIXEL;
|
|
static constexpr auto TRIANGLE = GpuCounterDescriptor_MeasureUnit_TRIANGLE;
|
|
static constexpr auto PRIMITIVE = GpuCounterDescriptor_MeasureUnit_PRIMITIVE;
|
|
static constexpr auto FRAGMENT = GpuCounterDescriptor_MeasureUnit_FRAGMENT;
|
|
static constexpr auto MILLIWATT = GpuCounterDescriptor_MeasureUnit_MILLIWATT;
|
|
static constexpr auto WATT = GpuCounterDescriptor_MeasureUnit_WATT;
|
|
static constexpr auto KILOWATT = GpuCounterDescriptor_MeasureUnit_KILOWATT;
|
|
static constexpr auto JOULE = GpuCounterDescriptor_MeasureUnit_JOULE;
|
|
static constexpr auto VOLT = GpuCounterDescriptor_MeasureUnit_VOLT;
|
|
static constexpr auto AMPERE = GpuCounterDescriptor_MeasureUnit_AMPERE;
|
|
static constexpr auto CELSIUS = GpuCounterDescriptor_MeasureUnit_CELSIUS;
|
|
static constexpr auto FAHRENHEIT = GpuCounterDescriptor_MeasureUnit_FAHRENHEIT;
|
|
static constexpr auto KELVIN = GpuCounterDescriptor_MeasureUnit_KELVIN;
|
|
static constexpr auto PERCENT = GpuCounterDescriptor_MeasureUnit_PERCENT;
|
|
static constexpr auto INSTRUCTION = GpuCounterDescriptor_MeasureUnit_INSTRUCTION;
|
|
static constexpr auto MeasureUnit_MIN = GpuCounterDescriptor_MeasureUnit_NONE;
|
|
static constexpr auto MeasureUnit_MAX = GpuCounterDescriptor_MeasureUnit_INSTRUCTION;
|
|
enum FieldNumbers {
|
|
kSpecsFieldNumber = 1,
|
|
kBlocksFieldNumber = 2,
|
|
kMinSamplingPeriodNsFieldNumber = 3,
|
|
kMaxSamplingPeriodNsFieldNumber = 4,
|
|
kSupportsInstrumentedSamplingFieldNumber = 5,
|
|
};
|
|
|
|
GpuCounterDescriptor();
|
|
~GpuCounterDescriptor() override;
|
|
GpuCounterDescriptor(GpuCounterDescriptor&&) noexcept;
|
|
GpuCounterDescriptor& operator=(GpuCounterDescriptor&&);
|
|
GpuCounterDescriptor(const GpuCounterDescriptor&);
|
|
GpuCounterDescriptor& operator=(const GpuCounterDescriptor&);
|
|
bool operator==(const GpuCounterDescriptor&) const;
|
|
bool operator!=(const GpuCounterDescriptor& other) const { return !(*this == other); }
|
|
|
|
bool ParseFromArray(const void*, size_t) override;
|
|
std::string SerializeAsString() const override;
|
|
std::vector<uint8_t> SerializeAsArray() const override;
|
|
void Serialize(::protozero::Message*) const;
|
|
|
|
int specs_size() const { return static_cast<int>(specs_.size()); }
|
|
const std::vector<GpuCounterDescriptor_GpuCounterSpec>& specs() const { return specs_; }
|
|
std::vector<GpuCounterDescriptor_GpuCounterSpec>* mutable_specs() { return &specs_; }
|
|
void clear_specs() { specs_.clear(); }
|
|
GpuCounterDescriptor_GpuCounterSpec* add_specs() { specs_.emplace_back(); return &specs_.back(); }
|
|
|
|
int blocks_size() const { return static_cast<int>(blocks_.size()); }
|
|
const std::vector<GpuCounterDescriptor_GpuCounterBlock>& blocks() const { return blocks_; }
|
|
std::vector<GpuCounterDescriptor_GpuCounterBlock>* mutable_blocks() { return &blocks_; }
|
|
void clear_blocks() { blocks_.clear(); }
|
|
GpuCounterDescriptor_GpuCounterBlock* add_blocks() { blocks_.emplace_back(); return &blocks_.back(); }
|
|
|
|
bool has_min_sampling_period_ns() const { return _has_field_[3]; }
|
|
uint64_t min_sampling_period_ns() const { return min_sampling_period_ns_; }
|
|
void set_min_sampling_period_ns(uint64_t value) { min_sampling_period_ns_ = value; _has_field_.set(3); }
|
|
|
|
bool has_max_sampling_period_ns() const { return _has_field_[4]; }
|
|
uint64_t max_sampling_period_ns() const { return max_sampling_period_ns_; }
|
|
void set_max_sampling_period_ns(uint64_t value) { max_sampling_period_ns_ = value; _has_field_.set(4); }
|
|
|
|
bool has_supports_instrumented_sampling() const { return _has_field_[5]; }
|
|
bool supports_instrumented_sampling() const { return supports_instrumented_sampling_; }
|
|
void set_supports_instrumented_sampling(bool value) { supports_instrumented_sampling_ = value; _has_field_.set(5); }
|
|
|
|
private:
|
|
std::vector<GpuCounterDescriptor_GpuCounterSpec> specs_;
|
|
std::vector<GpuCounterDescriptor_GpuCounterBlock> blocks_;
|
|
uint64_t min_sampling_period_ns_{};
|
|
uint64_t max_sampling_period_ns_{};
|
|
bool supports_instrumented_sampling_{};
|
|
|
|
// Allows to preserve unknown protobuf fields for compatibility
|
|
// with future versions of .proto files.
|
|
std::string unknown_fields_;
|
|
|
|
std::bitset<6> _has_field_{};
|
|
};
|
|
|
|
|
|
class PERFETTO_EXPORT GpuCounterDescriptor_GpuCounterBlock : public ::protozero::CppMessageObj {
|
|
public:
|
|
enum FieldNumbers {
|
|
kBlockIdFieldNumber = 1,
|
|
kBlockCapacityFieldNumber = 2,
|
|
kNameFieldNumber = 3,
|
|
kDescriptionFieldNumber = 4,
|
|
kCounterIdsFieldNumber = 5,
|
|
};
|
|
|
|
GpuCounterDescriptor_GpuCounterBlock();
|
|
~GpuCounterDescriptor_GpuCounterBlock() override;
|
|
GpuCounterDescriptor_GpuCounterBlock(GpuCounterDescriptor_GpuCounterBlock&&) noexcept;
|
|
GpuCounterDescriptor_GpuCounterBlock& operator=(GpuCounterDescriptor_GpuCounterBlock&&);
|
|
GpuCounterDescriptor_GpuCounterBlock(const GpuCounterDescriptor_GpuCounterBlock&);
|
|
GpuCounterDescriptor_GpuCounterBlock& operator=(const GpuCounterDescriptor_GpuCounterBlock&);
|
|
bool operator==(const GpuCounterDescriptor_GpuCounterBlock&) const;
|
|
bool operator!=(const GpuCounterDescriptor_GpuCounterBlock& other) const { return !(*this == other); }
|
|
|
|
bool ParseFromArray(const void*, size_t) override;
|
|
std::string SerializeAsString() const override;
|
|
std::vector<uint8_t> SerializeAsArray() const override;
|
|
void Serialize(::protozero::Message*) const;
|
|
|
|
bool has_block_id() const { return _has_field_[1]; }
|
|
uint32_t block_id() const { return block_id_; }
|
|
void set_block_id(uint32_t value) { block_id_ = value; _has_field_.set(1); }
|
|
|
|
bool has_block_capacity() const { return _has_field_[2]; }
|
|
uint32_t block_capacity() const { return block_capacity_; }
|
|
void set_block_capacity(uint32_t value) { block_capacity_ = value; _has_field_.set(2); }
|
|
|
|
bool has_name() const { return _has_field_[3]; }
|
|
const std::string& name() const { return name_; }
|
|
void set_name(const std::string& value) { name_ = value; _has_field_.set(3); }
|
|
|
|
bool has_description() const { return _has_field_[4]; }
|
|
const std::string& description() const { return description_; }
|
|
void set_description(const std::string& value) { description_ = value; _has_field_.set(4); }
|
|
|
|
int counter_ids_size() const { return static_cast<int>(counter_ids_.size()); }
|
|
const std::vector<uint32_t>& counter_ids() const { return counter_ids_; }
|
|
std::vector<uint32_t>* mutable_counter_ids() { return &counter_ids_; }
|
|
void clear_counter_ids() { counter_ids_.clear(); }
|
|
void add_counter_ids(uint32_t value) { counter_ids_.emplace_back(value); }
|
|
uint32_t* add_counter_ids() { counter_ids_.emplace_back(); return &counter_ids_.back(); }
|
|
|
|
private:
|
|
uint32_t block_id_{};
|
|
uint32_t block_capacity_{};
|
|
std::string name_{};
|
|
std::string description_{};
|
|
std::vector<uint32_t> counter_ids_;
|
|
|
|
// Allows to preserve unknown protobuf fields for compatibility
|
|
// with future versions of .proto files.
|
|
std::string unknown_fields_;
|
|
|
|
std::bitset<6> _has_field_{};
|
|
};
|
|
|
|
|
|
class PERFETTO_EXPORT GpuCounterDescriptor_GpuCounterSpec : public ::protozero::CppMessageObj {
|
|
public:
|
|
enum FieldNumbers {
|
|
kCounterIdFieldNumber = 1,
|
|
kNameFieldNumber = 2,
|
|
kDescriptionFieldNumber = 3,
|
|
kIntPeakValueFieldNumber = 5,
|
|
kDoublePeakValueFieldNumber = 6,
|
|
kNumeratorUnitsFieldNumber = 7,
|
|
kDenominatorUnitsFieldNumber = 8,
|
|
kSelectByDefaultFieldNumber = 9,
|
|
kGroupsFieldNumber = 10,
|
|
};
|
|
|
|
GpuCounterDescriptor_GpuCounterSpec();
|
|
~GpuCounterDescriptor_GpuCounterSpec() override;
|
|
GpuCounterDescriptor_GpuCounterSpec(GpuCounterDescriptor_GpuCounterSpec&&) noexcept;
|
|
GpuCounterDescriptor_GpuCounterSpec& operator=(GpuCounterDescriptor_GpuCounterSpec&&);
|
|
GpuCounterDescriptor_GpuCounterSpec(const GpuCounterDescriptor_GpuCounterSpec&);
|
|
GpuCounterDescriptor_GpuCounterSpec& operator=(const GpuCounterDescriptor_GpuCounterSpec&);
|
|
bool operator==(const GpuCounterDescriptor_GpuCounterSpec&) const;
|
|
bool operator!=(const GpuCounterDescriptor_GpuCounterSpec& other) const { return !(*this == other); }
|
|
|
|
bool ParseFromArray(const void*, size_t) override;
|
|
std::string SerializeAsString() const override;
|
|
std::vector<uint8_t> SerializeAsArray() const override;
|
|
void Serialize(::protozero::Message*) const;
|
|
|
|
bool has_counter_id() const { return _has_field_[1]; }
|
|
uint32_t counter_id() const { return counter_id_; }
|
|
void set_counter_id(uint32_t value) { counter_id_ = value; _has_field_.set(1); }
|
|
|
|
bool has_name() const { return _has_field_[2]; }
|
|
const std::string& name() const { return name_; }
|
|
void set_name(const std::string& value) { name_ = value; _has_field_.set(2); }
|
|
|
|
bool has_description() const { return _has_field_[3]; }
|
|
const std::string& description() const { return description_; }
|
|
void set_description(const std::string& value) { description_ = value; _has_field_.set(3); }
|
|
|
|
bool has_int_peak_value() const { return _has_field_[5]; }
|
|
int64_t int_peak_value() const { return int_peak_value_; }
|
|
void set_int_peak_value(int64_t value) { int_peak_value_ = value; _has_field_.set(5); }
|
|
|
|
bool has_double_peak_value() const { return _has_field_[6]; }
|
|
double double_peak_value() const { return double_peak_value_; }
|
|
void set_double_peak_value(double value) { double_peak_value_ = value; _has_field_.set(6); }
|
|
|
|
int numerator_units_size() const { return static_cast<int>(numerator_units_.size()); }
|
|
const std::vector<GpuCounterDescriptor_MeasureUnit>& numerator_units() const { return numerator_units_; }
|
|
std::vector<GpuCounterDescriptor_MeasureUnit>* mutable_numerator_units() { return &numerator_units_; }
|
|
void clear_numerator_units() { numerator_units_.clear(); }
|
|
void add_numerator_units(GpuCounterDescriptor_MeasureUnit value) { numerator_units_.emplace_back(value); }
|
|
GpuCounterDescriptor_MeasureUnit* add_numerator_units() { numerator_units_.emplace_back(); return &numerator_units_.back(); }
|
|
|
|
int denominator_units_size() const { return static_cast<int>(denominator_units_.size()); }
|
|
const std::vector<GpuCounterDescriptor_MeasureUnit>& denominator_units() const { return denominator_units_; }
|
|
std::vector<GpuCounterDescriptor_MeasureUnit>* mutable_denominator_units() { return &denominator_units_; }
|
|
void clear_denominator_units() { denominator_units_.clear(); }
|
|
void add_denominator_units(GpuCounterDescriptor_MeasureUnit value) { denominator_units_.emplace_back(value); }
|
|
GpuCounterDescriptor_MeasureUnit* add_denominator_units() { denominator_units_.emplace_back(); return &denominator_units_.back(); }
|
|
|
|
bool has_select_by_default() const { return _has_field_[9]; }
|
|
bool select_by_default() const { return select_by_default_; }
|
|
void set_select_by_default(bool value) { select_by_default_ = value; _has_field_.set(9); }
|
|
|
|
int groups_size() const { return static_cast<int>(groups_.size()); }
|
|
const std::vector<GpuCounterDescriptor_GpuCounterGroup>& groups() const { return groups_; }
|
|
std::vector<GpuCounterDescriptor_GpuCounterGroup>* mutable_groups() { return &groups_; }
|
|
void clear_groups() { groups_.clear(); }
|
|
void add_groups(GpuCounterDescriptor_GpuCounterGroup value) { groups_.emplace_back(value); }
|
|
GpuCounterDescriptor_GpuCounterGroup* add_groups() { groups_.emplace_back(); return &groups_.back(); }
|
|
|
|
private:
|
|
uint32_t counter_id_{};
|
|
std::string name_{};
|
|
std::string description_{};
|
|
int64_t int_peak_value_{};
|
|
double double_peak_value_{};
|
|
std::vector<GpuCounterDescriptor_MeasureUnit> numerator_units_;
|
|
std::vector<GpuCounterDescriptor_MeasureUnit> denominator_units_;
|
|
bool select_by_default_{};
|
|
std::vector<GpuCounterDescriptor_GpuCounterGroup> groups_;
|
|
|
|
// Allows to preserve unknown protobuf fields for compatibility
|
|
// with future versions of .proto files.
|
|
std::string unknown_fields_;
|
|
|
|
std::bitset<11> _has_field_{};
|
|
};
|
|
|
|
} // namespace perfetto
|
|
} // namespace protos
|
|
} // namespace gen
|
|
|
|
#endif // PERFETTO_PROTOS_PROTOS_PERFETTO_COMMON_GPU_COUNTER_DESCRIPTOR_PROTO_CPP_H_
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/scattered_heap_buffer.h"
|
|
// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
|
|
#pragma GCC diagnostic push
|
|
#pragma GCC diagnostic ignored "-Wfloat-equal"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/common/gpu_counter_descriptor.gen.h"
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace gen {
|
|
|
|
GpuCounterDescriptor::GpuCounterDescriptor() = default;
|
|
GpuCounterDescriptor::~GpuCounterDescriptor() = default;
|
|
GpuCounterDescriptor::GpuCounterDescriptor(const GpuCounterDescriptor&) = default;
|
|
GpuCounterDescriptor& GpuCounterDescriptor::operator=(const GpuCounterDescriptor&) = default;
|
|
GpuCounterDescriptor::GpuCounterDescriptor(GpuCounterDescriptor&&) noexcept = default;
|
|
GpuCounterDescriptor& GpuCounterDescriptor::operator=(GpuCounterDescriptor&&) = default;
|
|
|
|
bool GpuCounterDescriptor::operator==(const GpuCounterDescriptor& other) const {
|
|
return unknown_fields_ == other.unknown_fields_
|
|
&& specs_ == other.specs_
|
|
&& blocks_ == other.blocks_
|
|
&& min_sampling_period_ns_ == other.min_sampling_period_ns_
|
|
&& max_sampling_period_ns_ == other.max_sampling_period_ns_
|
|
&& supports_instrumented_sampling_ == other.supports_instrumented_sampling_;
|
|
}
|
|
|
|
bool GpuCounterDescriptor::ParseFromArray(const void* raw, size_t size) {
|
|
specs_.clear();
|
|
blocks_.clear();
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* specs */:
|
|
specs_.emplace_back();
|
|
specs_.back().ParseFromString(field.as_std_string());
|
|
break;
|
|
case 2 /* blocks */:
|
|
blocks_.emplace_back();
|
|
blocks_.back().ParseFromString(field.as_std_string());
|
|
break;
|
|
case 3 /* min_sampling_period_ns */:
|
|
field.get(&min_sampling_period_ns_);
|
|
break;
|
|
case 4 /* max_sampling_period_ns */:
|
|
field.get(&max_sampling_period_ns_);
|
|
break;
|
|
case 5 /* supports_instrumented_sampling */:
|
|
field.get(&supports_instrumented_sampling_);
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string GpuCounterDescriptor::SerializeAsString() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> GpuCounterDescriptor::SerializeAsArray() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void GpuCounterDescriptor::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: specs
|
|
for (auto& it : specs_) {
|
|
it.Serialize(msg->BeginNestedMessage<::protozero::Message>(1));
|
|
}
|
|
|
|
// Field 2: blocks
|
|
for (auto& it : blocks_) {
|
|
it.Serialize(msg->BeginNestedMessage<::protozero::Message>(2));
|
|
}
|
|
|
|
// Field 3: min_sampling_period_ns
|
|
if (_has_field_[3]) {
|
|
msg->AppendVarInt(3, min_sampling_period_ns_);
|
|
}
|
|
|
|
// Field 4: max_sampling_period_ns
|
|
if (_has_field_[4]) {
|
|
msg->AppendVarInt(4, max_sampling_period_ns_);
|
|
}
|
|
|
|
// Field 5: supports_instrumented_sampling
|
|
if (_has_field_[5]) {
|
|
msg->AppendTinyVarInt(5, supports_instrumented_sampling_);
|
|
}
|
|
|
|
msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
|
|
}
|
|
|
|
|
|
GpuCounterDescriptor_GpuCounterBlock::GpuCounterDescriptor_GpuCounterBlock() = default;
|
|
GpuCounterDescriptor_GpuCounterBlock::~GpuCounterDescriptor_GpuCounterBlock() = default;
|
|
GpuCounterDescriptor_GpuCounterBlock::GpuCounterDescriptor_GpuCounterBlock(const GpuCounterDescriptor_GpuCounterBlock&) = default;
|
|
GpuCounterDescriptor_GpuCounterBlock& GpuCounterDescriptor_GpuCounterBlock::operator=(const GpuCounterDescriptor_GpuCounterBlock&) = default;
|
|
GpuCounterDescriptor_GpuCounterBlock::GpuCounterDescriptor_GpuCounterBlock(GpuCounterDescriptor_GpuCounterBlock&&) noexcept = default;
|
|
GpuCounterDescriptor_GpuCounterBlock& GpuCounterDescriptor_GpuCounterBlock::operator=(GpuCounterDescriptor_GpuCounterBlock&&) = default;
|
|
|
|
bool GpuCounterDescriptor_GpuCounterBlock::operator==(const GpuCounterDescriptor_GpuCounterBlock& other) const {
|
|
return unknown_fields_ == other.unknown_fields_
|
|
&& block_id_ == other.block_id_
|
|
&& block_capacity_ == other.block_capacity_
|
|
&& name_ == other.name_
|
|
&& description_ == other.description_
|
|
&& counter_ids_ == other.counter_ids_;
|
|
}
|
|
|
|
bool GpuCounterDescriptor_GpuCounterBlock::ParseFromArray(const void* raw, size_t size) {
|
|
counter_ids_.clear();
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* block_id */:
|
|
field.get(&block_id_);
|
|
break;
|
|
case 2 /* block_capacity */:
|
|
field.get(&block_capacity_);
|
|
break;
|
|
case 3 /* name */:
|
|
field.get(&name_);
|
|
break;
|
|
case 4 /* description */:
|
|
field.get(&description_);
|
|
break;
|
|
case 5 /* counter_ids */:
|
|
counter_ids_.emplace_back();
|
|
field.get(&counter_ids_.back());
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string GpuCounterDescriptor_GpuCounterBlock::SerializeAsString() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> GpuCounterDescriptor_GpuCounterBlock::SerializeAsArray() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void GpuCounterDescriptor_GpuCounterBlock::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: block_id
|
|
if (_has_field_[1]) {
|
|
msg->AppendVarInt(1, block_id_);
|
|
}
|
|
|
|
// Field 2: block_capacity
|
|
if (_has_field_[2]) {
|
|
msg->AppendVarInt(2, block_capacity_);
|
|
}
|
|
|
|
// Field 3: name
|
|
if (_has_field_[3]) {
|
|
msg->AppendString(3, name_);
|
|
}
|
|
|
|
// Field 4: description
|
|
if (_has_field_[4]) {
|
|
msg->AppendString(4, description_);
|
|
}
|
|
|
|
// Field 5: counter_ids
|
|
for (auto& it : counter_ids_) {
|
|
msg->AppendVarInt(5, it);
|
|
}
|
|
|
|
msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
|
|
}
|
|
|
|
|
|
GpuCounterDescriptor_GpuCounterSpec::GpuCounterDescriptor_GpuCounterSpec() = default;
|
|
GpuCounterDescriptor_GpuCounterSpec::~GpuCounterDescriptor_GpuCounterSpec() = default;
|
|
GpuCounterDescriptor_GpuCounterSpec::GpuCounterDescriptor_GpuCounterSpec(const GpuCounterDescriptor_GpuCounterSpec&) = default;
|
|
GpuCounterDescriptor_GpuCounterSpec& GpuCounterDescriptor_GpuCounterSpec::operator=(const GpuCounterDescriptor_GpuCounterSpec&) = default;
|
|
GpuCounterDescriptor_GpuCounterSpec::GpuCounterDescriptor_GpuCounterSpec(GpuCounterDescriptor_GpuCounterSpec&&) noexcept = default;
|
|
GpuCounterDescriptor_GpuCounterSpec& GpuCounterDescriptor_GpuCounterSpec::operator=(GpuCounterDescriptor_GpuCounterSpec&&) = default;
|
|
|
|
bool GpuCounterDescriptor_GpuCounterSpec::operator==(const GpuCounterDescriptor_GpuCounterSpec& other) const {
|
|
return unknown_fields_ == other.unknown_fields_
|
|
&& counter_id_ == other.counter_id_
|
|
&& name_ == other.name_
|
|
&& description_ == other.description_
|
|
&& int_peak_value_ == other.int_peak_value_
|
|
&& double_peak_value_ == other.double_peak_value_
|
|
&& numerator_units_ == other.numerator_units_
|
|
&& denominator_units_ == other.denominator_units_
|
|
&& select_by_default_ == other.select_by_default_
|
|
&& groups_ == other.groups_;
|
|
}
|
|
|
|
bool GpuCounterDescriptor_GpuCounterSpec::ParseFromArray(const void* raw, size_t size) {
|
|
numerator_units_.clear();
|
|
denominator_units_.clear();
|
|
groups_.clear();
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* counter_id */:
|
|
field.get(&counter_id_);
|
|
break;
|
|
case 2 /* name */:
|
|
field.get(&name_);
|
|
break;
|
|
case 3 /* description */:
|
|
field.get(&description_);
|
|
break;
|
|
case 5 /* int_peak_value */:
|
|
field.get(&int_peak_value_);
|
|
break;
|
|
case 6 /* double_peak_value */:
|
|
field.get(&double_peak_value_);
|
|
break;
|
|
case 7 /* numerator_units */:
|
|
numerator_units_.emplace_back();
|
|
field.get(&numerator_units_.back());
|
|
break;
|
|
case 8 /* denominator_units */:
|
|
denominator_units_.emplace_back();
|
|
field.get(&denominator_units_.back());
|
|
break;
|
|
case 9 /* select_by_default */:
|
|
field.get(&select_by_default_);
|
|
break;
|
|
case 10 /* groups */:
|
|
groups_.emplace_back();
|
|
field.get(&groups_.back());
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string GpuCounterDescriptor_GpuCounterSpec::SerializeAsString() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> GpuCounterDescriptor_GpuCounterSpec::SerializeAsArray() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void GpuCounterDescriptor_GpuCounterSpec::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: counter_id
|
|
if (_has_field_[1]) {
|
|
msg->AppendVarInt(1, counter_id_);
|
|
}
|
|
|
|
// Field 2: name
|
|
if (_has_field_[2]) {
|
|
msg->AppendString(2, name_);
|
|
}
|
|
|
|
// Field 3: description
|
|
if (_has_field_[3]) {
|
|
msg->AppendString(3, description_);
|
|
}
|
|
|
|
// Field 5: int_peak_value
|
|
if (_has_field_[5]) {
|
|
msg->AppendVarInt(5, int_peak_value_);
|
|
}
|
|
|
|
// Field 6: double_peak_value
|
|
if (_has_field_[6]) {
|
|
msg->AppendFixed(6, double_peak_value_);
|
|
}
|
|
|
|
// Field 7: numerator_units
|
|
for (auto& it : numerator_units_) {
|
|
msg->AppendVarInt(7, it);
|
|
}
|
|
|
|
// Field 8: denominator_units
|
|
for (auto& it : denominator_units_) {
|
|
msg->AppendVarInt(8, it);
|
|
}
|
|
|
|
// Field 9: select_by_default
|
|
if (_has_field_[9]) {
|
|
msg->AppendTinyVarInt(9, select_by_default_);
|
|
}
|
|
|
|
// Field 10: groups
|
|
for (auto& it : groups_) {
|
|
msg->AppendVarInt(10, it);
|
|
}
|
|
|
|
msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
|
|
}
|
|
|
|
} // namespace perfetto
|
|
} // namespace protos
|
|
} // namespace gen
|
|
#pragma GCC diagnostic pop
|
|
// gen_amalgamated begin source: gen/protos/perfetto/common/observable_events.gen.cc
|
|
// gen_amalgamated begin header: gen/protos/perfetto/common/observable_events.gen.h
|
|
// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
|
|
#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_COMMON_OBSERVABLE_EVENTS_PROTO_CPP_H_
|
|
#define PERFETTO_PROTOS_PROTOS_PERFETTO_COMMON_OBSERVABLE_EVENTS_PROTO_CPP_H_
|
|
|
|
#include <stdint.h>
|
|
#include <bitset>
|
|
#include <vector>
|
|
#include <string>
|
|
#include <type_traits>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/cpp_message_obj.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/copyable_ptr.h"
|
|
// gen_amalgamated expanded: #include "perfetto/base/export.h"
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace gen {
|
|
class ObservableEvents;
|
|
class ObservableEvents_DataSourceInstanceStateChange;
|
|
enum ObservableEvents_Type : int;
|
|
enum ObservableEvents_DataSourceInstanceState : int;
|
|
} // namespace perfetto
|
|
} // namespace protos
|
|
} // namespace gen
|
|
|
|
namespace protozero {
|
|
class Message;
|
|
} // namespace protozero
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace gen {
|
|
enum ObservableEvents_Type : int {
|
|
ObservableEvents_Type_TYPE_UNSPECIFIED = 0,
|
|
ObservableEvents_Type_TYPE_DATA_SOURCES_INSTANCES = 1,
|
|
ObservableEvents_Type_TYPE_ALL_DATA_SOURCES_STARTED = 2,
|
|
};
|
|
enum ObservableEvents_DataSourceInstanceState : int {
|
|
ObservableEvents_DataSourceInstanceState_DATA_SOURCE_INSTANCE_STATE_STOPPED = 1,
|
|
ObservableEvents_DataSourceInstanceState_DATA_SOURCE_INSTANCE_STATE_STARTED = 2,
|
|
};
|
|
|
|
class PERFETTO_EXPORT ObservableEvents : public ::protozero::CppMessageObj {
|
|
public:
|
|
using DataSourceInstanceStateChange = ObservableEvents_DataSourceInstanceStateChange;
|
|
using Type = ObservableEvents_Type;
|
|
static constexpr auto TYPE_UNSPECIFIED = ObservableEvents_Type_TYPE_UNSPECIFIED;
|
|
static constexpr auto TYPE_DATA_SOURCES_INSTANCES = ObservableEvents_Type_TYPE_DATA_SOURCES_INSTANCES;
|
|
static constexpr auto TYPE_ALL_DATA_SOURCES_STARTED = ObservableEvents_Type_TYPE_ALL_DATA_SOURCES_STARTED;
|
|
static constexpr auto Type_MIN = ObservableEvents_Type_TYPE_UNSPECIFIED;
|
|
static constexpr auto Type_MAX = ObservableEvents_Type_TYPE_ALL_DATA_SOURCES_STARTED;
|
|
using DataSourceInstanceState = ObservableEvents_DataSourceInstanceState;
|
|
static constexpr auto DATA_SOURCE_INSTANCE_STATE_STOPPED = ObservableEvents_DataSourceInstanceState_DATA_SOURCE_INSTANCE_STATE_STOPPED;
|
|
static constexpr auto DATA_SOURCE_INSTANCE_STATE_STARTED = ObservableEvents_DataSourceInstanceState_DATA_SOURCE_INSTANCE_STATE_STARTED;
|
|
static constexpr auto DataSourceInstanceState_MIN = ObservableEvents_DataSourceInstanceState_DATA_SOURCE_INSTANCE_STATE_STOPPED;
|
|
static constexpr auto DataSourceInstanceState_MAX = ObservableEvents_DataSourceInstanceState_DATA_SOURCE_INSTANCE_STATE_STARTED;
|
|
enum FieldNumbers {
|
|
kInstanceStateChangesFieldNumber = 1,
|
|
kAllDataSourcesStartedFieldNumber = 2,
|
|
};
|
|
|
|
ObservableEvents();
|
|
~ObservableEvents() override;
|
|
ObservableEvents(ObservableEvents&&) noexcept;
|
|
ObservableEvents& operator=(ObservableEvents&&);
|
|
ObservableEvents(const ObservableEvents&);
|
|
ObservableEvents& operator=(const ObservableEvents&);
|
|
bool operator==(const ObservableEvents&) const;
|
|
bool operator!=(const ObservableEvents& other) const { return !(*this == other); }
|
|
|
|
bool ParseFromArray(const void*, size_t) override;
|
|
std::string SerializeAsString() const override;
|
|
std::vector<uint8_t> SerializeAsArray() const override;
|
|
void Serialize(::protozero::Message*) const;
|
|
|
|
int instance_state_changes_size() const { return static_cast<int>(instance_state_changes_.size()); }
|
|
const std::vector<ObservableEvents_DataSourceInstanceStateChange>& instance_state_changes() const { return instance_state_changes_; }
|
|
std::vector<ObservableEvents_DataSourceInstanceStateChange>* mutable_instance_state_changes() { return &instance_state_changes_; }
|
|
void clear_instance_state_changes() { instance_state_changes_.clear(); }
|
|
ObservableEvents_DataSourceInstanceStateChange* add_instance_state_changes() { instance_state_changes_.emplace_back(); return &instance_state_changes_.back(); }
|
|
|
|
bool has_all_data_sources_started() const { return _has_field_[2]; }
|
|
bool all_data_sources_started() const { return all_data_sources_started_; }
|
|
void set_all_data_sources_started(bool value) { all_data_sources_started_ = value; _has_field_.set(2); }
|
|
|
|
private:
|
|
std::vector<ObservableEvents_DataSourceInstanceStateChange> instance_state_changes_;
|
|
bool all_data_sources_started_{};
|
|
|
|
// Allows to preserve unknown protobuf fields for compatibility
|
|
// with future versions of .proto files.
|
|
std::string unknown_fields_;
|
|
|
|
std::bitset<3> _has_field_{};
|
|
};
|
|
|
|
|
|
class PERFETTO_EXPORT ObservableEvents_DataSourceInstanceStateChange : public ::protozero::CppMessageObj {
|
|
public:
|
|
enum FieldNumbers {
|
|
kProducerNameFieldNumber = 1,
|
|
kDataSourceNameFieldNumber = 2,
|
|
kStateFieldNumber = 3,
|
|
};
|
|
|
|
ObservableEvents_DataSourceInstanceStateChange();
|
|
~ObservableEvents_DataSourceInstanceStateChange() override;
|
|
ObservableEvents_DataSourceInstanceStateChange(ObservableEvents_DataSourceInstanceStateChange&&) noexcept;
|
|
ObservableEvents_DataSourceInstanceStateChange& operator=(ObservableEvents_DataSourceInstanceStateChange&&);
|
|
ObservableEvents_DataSourceInstanceStateChange(const ObservableEvents_DataSourceInstanceStateChange&);
|
|
ObservableEvents_DataSourceInstanceStateChange& operator=(const ObservableEvents_DataSourceInstanceStateChange&);
|
|
bool operator==(const ObservableEvents_DataSourceInstanceStateChange&) const;
|
|
bool operator!=(const ObservableEvents_DataSourceInstanceStateChange& other) const { return !(*this == other); }
|
|
|
|
bool ParseFromArray(const void*, size_t) override;
|
|
std::string SerializeAsString() const override;
|
|
std::vector<uint8_t> SerializeAsArray() const override;
|
|
void Serialize(::protozero::Message*) const;
|
|
|
|
bool has_producer_name() const { return _has_field_[1]; }
|
|
const std::string& producer_name() const { return producer_name_; }
|
|
void set_producer_name(const std::string& value) { producer_name_ = value; _has_field_.set(1); }
|
|
|
|
bool has_data_source_name() const { return _has_field_[2]; }
|
|
const std::string& data_source_name() const { return data_source_name_; }
|
|
void set_data_source_name(const std::string& value) { data_source_name_ = value; _has_field_.set(2); }
|
|
|
|
bool has_state() const { return _has_field_[3]; }
|
|
ObservableEvents_DataSourceInstanceState state() const { return state_; }
|
|
void set_state(ObservableEvents_DataSourceInstanceState value) { state_ = value; _has_field_.set(3); }
|
|
|
|
private:
|
|
std::string producer_name_{};
|
|
std::string data_source_name_{};
|
|
ObservableEvents_DataSourceInstanceState state_{};
|
|
|
|
// Allows to preserve unknown protobuf fields for compatibility
|
|
// with future versions of .proto files.
|
|
std::string unknown_fields_;
|
|
|
|
std::bitset<4> _has_field_{};
|
|
};
|
|
|
|
} // namespace perfetto
|
|
} // namespace protos
|
|
} // namespace gen
|
|
|
|
#endif // PERFETTO_PROTOS_PROTOS_PERFETTO_COMMON_OBSERVABLE_EVENTS_PROTO_CPP_H_
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/scattered_heap_buffer.h"
|
|
// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
|
|
#pragma GCC diagnostic push
|
|
#pragma GCC diagnostic ignored "-Wfloat-equal"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/common/observable_events.gen.h"
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace gen {
|
|
|
|
ObservableEvents::ObservableEvents() = default;
|
|
ObservableEvents::~ObservableEvents() = default;
|
|
ObservableEvents::ObservableEvents(const ObservableEvents&) = default;
|
|
ObservableEvents& ObservableEvents::operator=(const ObservableEvents&) = default;
|
|
ObservableEvents::ObservableEvents(ObservableEvents&&) noexcept = default;
|
|
ObservableEvents& ObservableEvents::operator=(ObservableEvents&&) = default;
|
|
|
|
bool ObservableEvents::operator==(const ObservableEvents& other) const {
|
|
return unknown_fields_ == other.unknown_fields_
|
|
&& instance_state_changes_ == other.instance_state_changes_
|
|
&& all_data_sources_started_ == other.all_data_sources_started_;
|
|
}
|
|
|
|
bool ObservableEvents::ParseFromArray(const void* raw, size_t size) {
|
|
instance_state_changes_.clear();
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* instance_state_changes */:
|
|
instance_state_changes_.emplace_back();
|
|
instance_state_changes_.back().ParseFromString(field.as_std_string());
|
|
break;
|
|
case 2 /* all_data_sources_started */:
|
|
field.get(&all_data_sources_started_);
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string ObservableEvents::SerializeAsString() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> ObservableEvents::SerializeAsArray() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void ObservableEvents::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: instance_state_changes
|
|
for (auto& it : instance_state_changes_) {
|
|
it.Serialize(msg->BeginNestedMessage<::protozero::Message>(1));
|
|
}
|
|
|
|
// Field 2: all_data_sources_started
|
|
if (_has_field_[2]) {
|
|
msg->AppendTinyVarInt(2, all_data_sources_started_);
|
|
}
|
|
|
|
msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
|
|
}
|
|
|
|
|
|
ObservableEvents_DataSourceInstanceStateChange::ObservableEvents_DataSourceInstanceStateChange() = default;
|
|
ObservableEvents_DataSourceInstanceStateChange::~ObservableEvents_DataSourceInstanceStateChange() = default;
|
|
ObservableEvents_DataSourceInstanceStateChange::ObservableEvents_DataSourceInstanceStateChange(const ObservableEvents_DataSourceInstanceStateChange&) = default;
|
|
ObservableEvents_DataSourceInstanceStateChange& ObservableEvents_DataSourceInstanceStateChange::operator=(const ObservableEvents_DataSourceInstanceStateChange&) = default;
|
|
ObservableEvents_DataSourceInstanceStateChange::ObservableEvents_DataSourceInstanceStateChange(ObservableEvents_DataSourceInstanceStateChange&&) noexcept = default;
|
|
ObservableEvents_DataSourceInstanceStateChange& ObservableEvents_DataSourceInstanceStateChange::operator=(ObservableEvents_DataSourceInstanceStateChange&&) = default;
|
|
|
|
bool ObservableEvents_DataSourceInstanceStateChange::operator==(const ObservableEvents_DataSourceInstanceStateChange& other) const {
|
|
return unknown_fields_ == other.unknown_fields_
|
|
&& producer_name_ == other.producer_name_
|
|
&& data_source_name_ == other.data_source_name_
|
|
&& state_ == other.state_;
|
|
}
|
|
|
|
bool ObservableEvents_DataSourceInstanceStateChange::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* producer_name */:
|
|
field.get(&producer_name_);
|
|
break;
|
|
case 2 /* data_source_name */:
|
|
field.get(&data_source_name_);
|
|
break;
|
|
case 3 /* state */:
|
|
field.get(&state_);
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string ObservableEvents_DataSourceInstanceStateChange::SerializeAsString() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> ObservableEvents_DataSourceInstanceStateChange::SerializeAsArray() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void ObservableEvents_DataSourceInstanceStateChange::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: producer_name
|
|
if (_has_field_[1]) {
|
|
msg->AppendString(1, producer_name_);
|
|
}
|
|
|
|
// Field 2: data_source_name
|
|
if (_has_field_[2]) {
|
|
msg->AppendString(2, data_source_name_);
|
|
}
|
|
|
|
// Field 3: state
|
|
if (_has_field_[3]) {
|
|
msg->AppendVarInt(3, state_);
|
|
}
|
|
|
|
msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
|
|
}
|
|
|
|
} // namespace perfetto
|
|
} // namespace protos
|
|
} // namespace gen
|
|
#pragma GCC diagnostic pop
|
|
// gen_amalgamated begin source: gen/protos/perfetto/common/sys_stats_counters.gen.cc
|
|
// gen_amalgamated begin header: gen/protos/perfetto/common/sys_stats_counters.gen.h
|
|
// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
|
|
#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_COMMON_SYS_STATS_COUNTERS_PROTO_CPP_H_
|
|
#define PERFETTO_PROTOS_PROTOS_PERFETTO_COMMON_SYS_STATS_COUNTERS_PROTO_CPP_H_
|
|
|
|
#include <stdint.h>
|
|
#include <bitset>
|
|
#include <vector>
|
|
#include <string>
|
|
#include <type_traits>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/cpp_message_obj.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/copyable_ptr.h"
|
|
// gen_amalgamated expanded: #include "perfetto/base/export.h"
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace gen {
|
|
enum MeminfoCounters : int;
|
|
enum VmstatCounters : int;
|
|
} // namespace perfetto
|
|
} // namespace protos
|
|
} // namespace gen
|
|
|
|
namespace protozero {
|
|
class Message;
|
|
} // namespace protozero
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace gen {
|
|
enum MeminfoCounters : int {
|
|
MEMINFO_UNSPECIFIED = 0,
|
|
MEMINFO_MEM_TOTAL = 1,
|
|
MEMINFO_MEM_FREE = 2,
|
|
MEMINFO_MEM_AVAILABLE = 3,
|
|
MEMINFO_BUFFERS = 4,
|
|
MEMINFO_CACHED = 5,
|
|
MEMINFO_SWAP_CACHED = 6,
|
|
MEMINFO_ACTIVE = 7,
|
|
MEMINFO_INACTIVE = 8,
|
|
MEMINFO_ACTIVE_ANON = 9,
|
|
MEMINFO_INACTIVE_ANON = 10,
|
|
MEMINFO_ACTIVE_FILE = 11,
|
|
MEMINFO_INACTIVE_FILE = 12,
|
|
MEMINFO_UNEVICTABLE = 13,
|
|
MEMINFO_MLOCKED = 14,
|
|
MEMINFO_SWAP_TOTAL = 15,
|
|
MEMINFO_SWAP_FREE = 16,
|
|
MEMINFO_DIRTY = 17,
|
|
MEMINFO_WRITEBACK = 18,
|
|
MEMINFO_ANON_PAGES = 19,
|
|
MEMINFO_MAPPED = 20,
|
|
MEMINFO_SHMEM = 21,
|
|
MEMINFO_SLAB = 22,
|
|
MEMINFO_SLAB_RECLAIMABLE = 23,
|
|
MEMINFO_SLAB_UNRECLAIMABLE = 24,
|
|
MEMINFO_KERNEL_STACK = 25,
|
|
MEMINFO_PAGE_TABLES = 26,
|
|
MEMINFO_COMMIT_LIMIT = 27,
|
|
MEMINFO_COMMITED_AS = 28,
|
|
MEMINFO_VMALLOC_TOTAL = 29,
|
|
MEMINFO_VMALLOC_USED = 30,
|
|
MEMINFO_VMALLOC_CHUNK = 31,
|
|
MEMINFO_CMA_TOTAL = 32,
|
|
MEMINFO_CMA_FREE = 33,
|
|
};
|
|
enum VmstatCounters : int {
|
|
VMSTAT_UNSPECIFIED = 0,
|
|
VMSTAT_NR_FREE_PAGES = 1,
|
|
VMSTAT_NR_ALLOC_BATCH = 2,
|
|
VMSTAT_NR_INACTIVE_ANON = 3,
|
|
VMSTAT_NR_ACTIVE_ANON = 4,
|
|
VMSTAT_NR_INACTIVE_FILE = 5,
|
|
VMSTAT_NR_ACTIVE_FILE = 6,
|
|
VMSTAT_NR_UNEVICTABLE = 7,
|
|
VMSTAT_NR_MLOCK = 8,
|
|
VMSTAT_NR_ANON_PAGES = 9,
|
|
VMSTAT_NR_MAPPED = 10,
|
|
VMSTAT_NR_FILE_PAGES = 11,
|
|
VMSTAT_NR_DIRTY = 12,
|
|
VMSTAT_NR_WRITEBACK = 13,
|
|
VMSTAT_NR_SLAB_RECLAIMABLE = 14,
|
|
VMSTAT_NR_SLAB_UNRECLAIMABLE = 15,
|
|
VMSTAT_NR_PAGE_TABLE_PAGES = 16,
|
|
VMSTAT_NR_KERNEL_STACK = 17,
|
|
VMSTAT_NR_OVERHEAD = 18,
|
|
VMSTAT_NR_UNSTABLE = 19,
|
|
VMSTAT_NR_BOUNCE = 20,
|
|
VMSTAT_NR_VMSCAN_WRITE = 21,
|
|
VMSTAT_NR_VMSCAN_IMMEDIATE_RECLAIM = 22,
|
|
VMSTAT_NR_WRITEBACK_TEMP = 23,
|
|
VMSTAT_NR_ISOLATED_ANON = 24,
|
|
VMSTAT_NR_ISOLATED_FILE = 25,
|
|
VMSTAT_NR_SHMEM = 26,
|
|
VMSTAT_NR_DIRTIED = 27,
|
|
VMSTAT_NR_WRITTEN = 28,
|
|
VMSTAT_NR_PAGES_SCANNED = 29,
|
|
VMSTAT_WORKINGSET_REFAULT = 30,
|
|
VMSTAT_WORKINGSET_ACTIVATE = 31,
|
|
VMSTAT_WORKINGSET_NODERECLAIM = 32,
|
|
VMSTAT_NR_ANON_TRANSPARENT_HUGEPAGES = 33,
|
|
VMSTAT_NR_FREE_CMA = 34,
|
|
VMSTAT_NR_SWAPCACHE = 35,
|
|
VMSTAT_NR_DIRTY_THRESHOLD = 36,
|
|
VMSTAT_NR_DIRTY_BACKGROUND_THRESHOLD = 37,
|
|
VMSTAT_PGPGIN = 38,
|
|
VMSTAT_PGPGOUT = 39,
|
|
VMSTAT_PGPGOUTCLEAN = 40,
|
|
VMSTAT_PSWPIN = 41,
|
|
VMSTAT_PSWPOUT = 42,
|
|
VMSTAT_PGALLOC_DMA = 43,
|
|
VMSTAT_PGALLOC_NORMAL = 44,
|
|
VMSTAT_PGALLOC_MOVABLE = 45,
|
|
VMSTAT_PGFREE = 46,
|
|
VMSTAT_PGACTIVATE = 47,
|
|
VMSTAT_PGDEACTIVATE = 48,
|
|
VMSTAT_PGFAULT = 49,
|
|
VMSTAT_PGMAJFAULT = 50,
|
|
VMSTAT_PGREFILL_DMA = 51,
|
|
VMSTAT_PGREFILL_NORMAL = 52,
|
|
VMSTAT_PGREFILL_MOVABLE = 53,
|
|
VMSTAT_PGSTEAL_KSWAPD_DMA = 54,
|
|
VMSTAT_PGSTEAL_KSWAPD_NORMAL = 55,
|
|
VMSTAT_PGSTEAL_KSWAPD_MOVABLE = 56,
|
|
VMSTAT_PGSTEAL_DIRECT_DMA = 57,
|
|
VMSTAT_PGSTEAL_DIRECT_NORMAL = 58,
|
|
VMSTAT_PGSTEAL_DIRECT_MOVABLE = 59,
|
|
VMSTAT_PGSCAN_KSWAPD_DMA = 60,
|
|
VMSTAT_PGSCAN_KSWAPD_NORMAL = 61,
|
|
VMSTAT_PGSCAN_KSWAPD_MOVABLE = 62,
|
|
VMSTAT_PGSCAN_DIRECT_DMA = 63,
|
|
VMSTAT_PGSCAN_DIRECT_NORMAL = 64,
|
|
VMSTAT_PGSCAN_DIRECT_MOVABLE = 65,
|
|
VMSTAT_PGSCAN_DIRECT_THROTTLE = 66,
|
|
VMSTAT_PGINODESTEAL = 67,
|
|
VMSTAT_SLABS_SCANNED = 68,
|
|
VMSTAT_KSWAPD_INODESTEAL = 69,
|
|
VMSTAT_KSWAPD_LOW_WMARK_HIT_QUICKLY = 70,
|
|
VMSTAT_KSWAPD_HIGH_WMARK_HIT_QUICKLY = 71,
|
|
VMSTAT_PAGEOUTRUN = 72,
|
|
VMSTAT_ALLOCSTALL = 73,
|
|
VMSTAT_PGROTATED = 74,
|
|
VMSTAT_DROP_PAGECACHE = 75,
|
|
VMSTAT_DROP_SLAB = 76,
|
|
VMSTAT_PGMIGRATE_SUCCESS = 77,
|
|
VMSTAT_PGMIGRATE_FAIL = 78,
|
|
VMSTAT_COMPACT_MIGRATE_SCANNED = 79,
|
|
VMSTAT_COMPACT_FREE_SCANNED = 80,
|
|
VMSTAT_COMPACT_ISOLATED = 81,
|
|
VMSTAT_COMPACT_STALL = 82,
|
|
VMSTAT_COMPACT_FAIL = 83,
|
|
VMSTAT_COMPACT_SUCCESS = 84,
|
|
VMSTAT_COMPACT_DAEMON_WAKE = 85,
|
|
VMSTAT_UNEVICTABLE_PGS_CULLED = 86,
|
|
VMSTAT_UNEVICTABLE_PGS_SCANNED = 87,
|
|
VMSTAT_UNEVICTABLE_PGS_RESCUED = 88,
|
|
VMSTAT_UNEVICTABLE_PGS_MLOCKED = 89,
|
|
VMSTAT_UNEVICTABLE_PGS_MUNLOCKED = 90,
|
|
VMSTAT_UNEVICTABLE_PGS_CLEARED = 91,
|
|
VMSTAT_UNEVICTABLE_PGS_STRANDED = 92,
|
|
VMSTAT_NR_ZSPAGES = 93,
|
|
VMSTAT_NR_ION_HEAP = 94,
|
|
VMSTAT_NR_GPU_HEAP = 95,
|
|
};
|
|
} // namespace perfetto
|
|
} // namespace protos
|
|
} // namespace gen
|
|
|
|
#endif // PERFETTO_PROTOS_PROTOS_PERFETTO_COMMON_SYS_STATS_COUNTERS_PROTO_CPP_H_
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/scattered_heap_buffer.h"
|
|
// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
|
|
#pragma GCC diagnostic push
|
|
#pragma GCC diagnostic ignored "-Wfloat-equal"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/common/sys_stats_counters.gen.h"
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace gen {
|
|
} // namespace perfetto
|
|
} // namespace protos
|
|
} // namespace gen
|
|
#pragma GCC diagnostic pop
|
|
// gen_amalgamated begin source: gen/protos/perfetto/common/trace_stats.gen.cc
|
|
// gen_amalgamated begin header: gen/protos/perfetto/common/trace_stats.gen.h
|
|
// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
|
|
#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_COMMON_TRACE_STATS_PROTO_CPP_H_
|
|
#define PERFETTO_PROTOS_PROTOS_PERFETTO_COMMON_TRACE_STATS_PROTO_CPP_H_
|
|
|
|
#include <stdint.h>
|
|
#include <bitset>
|
|
#include <vector>
|
|
#include <string>
|
|
#include <type_traits>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/cpp_message_obj.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/copyable_ptr.h"
|
|
// gen_amalgamated expanded: #include "perfetto/base/export.h"
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace gen {
|
|
class TraceStats;
|
|
class TraceStats_BufferStats;
|
|
} // namespace perfetto
|
|
} // namespace protos
|
|
} // namespace gen
|
|
|
|
namespace protozero {
|
|
class Message;
|
|
} // namespace protozero
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace gen {
|
|
|
|
class PERFETTO_EXPORT TraceStats : public ::protozero::CppMessageObj {
|
|
public:
|
|
using BufferStats = TraceStats_BufferStats;
|
|
enum FieldNumbers {
|
|
kBufferStatsFieldNumber = 1,
|
|
kProducersConnectedFieldNumber = 2,
|
|
kProducersSeenFieldNumber = 3,
|
|
kDataSourcesRegisteredFieldNumber = 4,
|
|
kDataSourcesSeenFieldNumber = 5,
|
|
kTracingSessionsFieldNumber = 6,
|
|
kTotalBuffersFieldNumber = 7,
|
|
kChunksDiscardedFieldNumber = 8,
|
|
kPatchesDiscardedFieldNumber = 9,
|
|
kInvalidPacketsFieldNumber = 10,
|
|
};
|
|
|
|
TraceStats();
|
|
~TraceStats() override;
|
|
TraceStats(TraceStats&&) noexcept;
|
|
TraceStats& operator=(TraceStats&&);
|
|
TraceStats(const TraceStats&);
|
|
TraceStats& operator=(const TraceStats&);
|
|
bool operator==(const TraceStats&) const;
|
|
bool operator!=(const TraceStats& other) const { return !(*this == other); }
|
|
|
|
bool ParseFromArray(const void*, size_t) override;
|
|
std::string SerializeAsString() const override;
|
|
std::vector<uint8_t> SerializeAsArray() const override;
|
|
void Serialize(::protozero::Message*) const;
|
|
|
|
int buffer_stats_size() const { return static_cast<int>(buffer_stats_.size()); }
|
|
const std::vector<TraceStats_BufferStats>& buffer_stats() const { return buffer_stats_; }
|
|
std::vector<TraceStats_BufferStats>* mutable_buffer_stats() { return &buffer_stats_; }
|
|
void clear_buffer_stats() { buffer_stats_.clear(); }
|
|
TraceStats_BufferStats* add_buffer_stats() { buffer_stats_.emplace_back(); return &buffer_stats_.back(); }
|
|
|
|
bool has_producers_connected() const { return _has_field_[2]; }
|
|
uint32_t producers_connected() const { return producers_connected_; }
|
|
void set_producers_connected(uint32_t value) { producers_connected_ = value; _has_field_.set(2); }
|
|
|
|
bool has_producers_seen() const { return _has_field_[3]; }
|
|
uint64_t producers_seen() const { return producers_seen_; }
|
|
void set_producers_seen(uint64_t value) { producers_seen_ = value; _has_field_.set(3); }
|
|
|
|
bool has_data_sources_registered() const { return _has_field_[4]; }
|
|
uint32_t data_sources_registered() const { return data_sources_registered_; }
|
|
void set_data_sources_registered(uint32_t value) { data_sources_registered_ = value; _has_field_.set(4); }
|
|
|
|
bool has_data_sources_seen() const { return _has_field_[5]; }
|
|
uint64_t data_sources_seen() const { return data_sources_seen_; }
|
|
void set_data_sources_seen(uint64_t value) { data_sources_seen_ = value; _has_field_.set(5); }
|
|
|
|
bool has_tracing_sessions() const { return _has_field_[6]; }
|
|
uint32_t tracing_sessions() const { return tracing_sessions_; }
|
|
void set_tracing_sessions(uint32_t value) { tracing_sessions_ = value; _has_field_.set(6); }
|
|
|
|
bool has_total_buffers() const { return _has_field_[7]; }
|
|
uint32_t total_buffers() const { return total_buffers_; }
|
|
void set_total_buffers(uint32_t value) { total_buffers_ = value; _has_field_.set(7); }
|
|
|
|
bool has_chunks_discarded() const { return _has_field_[8]; }
|
|
uint64_t chunks_discarded() const { return chunks_discarded_; }
|
|
void set_chunks_discarded(uint64_t value) { chunks_discarded_ = value; _has_field_.set(8); }
|
|
|
|
bool has_patches_discarded() const { return _has_field_[9]; }
|
|
uint64_t patches_discarded() const { return patches_discarded_; }
|
|
void set_patches_discarded(uint64_t value) { patches_discarded_ = value; _has_field_.set(9); }
|
|
|
|
bool has_invalid_packets() const { return _has_field_[10]; }
|
|
uint64_t invalid_packets() const { return invalid_packets_; }
|
|
void set_invalid_packets(uint64_t value) { invalid_packets_ = value; _has_field_.set(10); }
|
|
|
|
private:
|
|
std::vector<TraceStats_BufferStats> buffer_stats_;
|
|
uint32_t producers_connected_{};
|
|
uint64_t producers_seen_{};
|
|
uint32_t data_sources_registered_{};
|
|
uint64_t data_sources_seen_{};
|
|
uint32_t tracing_sessions_{};
|
|
uint32_t total_buffers_{};
|
|
uint64_t chunks_discarded_{};
|
|
uint64_t patches_discarded_{};
|
|
uint64_t invalid_packets_{};
|
|
|
|
// Allows to preserve unknown protobuf fields for compatibility
|
|
// with future versions of .proto files.
|
|
std::string unknown_fields_;
|
|
|
|
std::bitset<11> _has_field_{};
|
|
};
|
|
|
|
|
|
class PERFETTO_EXPORT TraceStats_BufferStats : public ::protozero::CppMessageObj {
|
|
public:
|
|
enum FieldNumbers {
|
|
kBufferSizeFieldNumber = 12,
|
|
kBytesWrittenFieldNumber = 1,
|
|
kBytesOverwrittenFieldNumber = 13,
|
|
kBytesReadFieldNumber = 14,
|
|
kPaddingBytesWrittenFieldNumber = 15,
|
|
kPaddingBytesClearedFieldNumber = 16,
|
|
kChunksWrittenFieldNumber = 2,
|
|
kChunksRewrittenFieldNumber = 10,
|
|
kChunksOverwrittenFieldNumber = 3,
|
|
kChunksDiscardedFieldNumber = 18,
|
|
kChunksReadFieldNumber = 17,
|
|
kChunksCommittedOutOfOrderFieldNumber = 11,
|
|
kWriteWrapCountFieldNumber = 4,
|
|
kPatchesSucceededFieldNumber = 5,
|
|
kPatchesFailedFieldNumber = 6,
|
|
kReadaheadsSucceededFieldNumber = 7,
|
|
kReadaheadsFailedFieldNumber = 8,
|
|
kAbiViolationsFieldNumber = 9,
|
|
kTraceWriterPacketLossFieldNumber = 19,
|
|
};
|
|
|
|
TraceStats_BufferStats();
|
|
~TraceStats_BufferStats() override;
|
|
TraceStats_BufferStats(TraceStats_BufferStats&&) noexcept;
|
|
TraceStats_BufferStats& operator=(TraceStats_BufferStats&&);
|
|
TraceStats_BufferStats(const TraceStats_BufferStats&);
|
|
TraceStats_BufferStats& operator=(const TraceStats_BufferStats&);
|
|
bool operator==(const TraceStats_BufferStats&) const;
|
|
bool operator!=(const TraceStats_BufferStats& other) const { return !(*this == other); }
|
|
|
|
bool ParseFromArray(const void*, size_t) override;
|
|
std::string SerializeAsString() const override;
|
|
std::vector<uint8_t> SerializeAsArray() const override;
|
|
void Serialize(::protozero::Message*) const;
|
|
|
|
bool has_buffer_size() const { return _has_field_[12]; }
|
|
uint64_t buffer_size() const { return buffer_size_; }
|
|
void set_buffer_size(uint64_t value) { buffer_size_ = value; _has_field_.set(12); }
|
|
|
|
bool has_bytes_written() const { return _has_field_[1]; }
|
|
uint64_t bytes_written() const { return bytes_written_; }
|
|
void set_bytes_written(uint64_t value) { bytes_written_ = value; _has_field_.set(1); }
|
|
|
|
bool has_bytes_overwritten() const { return _has_field_[13]; }
|
|
uint64_t bytes_overwritten() const { return bytes_overwritten_; }
|
|
void set_bytes_overwritten(uint64_t value) { bytes_overwritten_ = value; _has_field_.set(13); }
|
|
|
|
bool has_bytes_read() const { return _has_field_[14]; }
|
|
uint64_t bytes_read() const { return bytes_read_; }
|
|
void set_bytes_read(uint64_t value) { bytes_read_ = value; _has_field_.set(14); }
|
|
|
|
bool has_padding_bytes_written() const { return _has_field_[15]; }
|
|
uint64_t padding_bytes_written() const { return padding_bytes_written_; }
|
|
void set_padding_bytes_written(uint64_t value) { padding_bytes_written_ = value; _has_field_.set(15); }
|
|
|
|
bool has_padding_bytes_cleared() const { return _has_field_[16]; }
|
|
uint64_t padding_bytes_cleared() const { return padding_bytes_cleared_; }
|
|
void set_padding_bytes_cleared(uint64_t value) { padding_bytes_cleared_ = value; _has_field_.set(16); }
|
|
|
|
bool has_chunks_written() const { return _has_field_[2]; }
|
|
uint64_t chunks_written() const { return chunks_written_; }
|
|
void set_chunks_written(uint64_t value) { chunks_written_ = value; _has_field_.set(2); }
|
|
|
|
bool has_chunks_rewritten() const { return _has_field_[10]; }
|
|
uint64_t chunks_rewritten() const { return chunks_rewritten_; }
|
|
void set_chunks_rewritten(uint64_t value) { chunks_rewritten_ = value; _has_field_.set(10); }
|
|
|
|
bool has_chunks_overwritten() const { return _has_field_[3]; }
|
|
uint64_t chunks_overwritten() const { return chunks_overwritten_; }
|
|
void set_chunks_overwritten(uint64_t value) { chunks_overwritten_ = value; _has_field_.set(3); }
|
|
|
|
bool has_chunks_discarded() const { return _has_field_[18]; }
|
|
uint64_t chunks_discarded() const { return chunks_discarded_; }
|
|
void set_chunks_discarded(uint64_t value) { chunks_discarded_ = value; _has_field_.set(18); }
|
|
|
|
bool has_chunks_read() const { return _has_field_[17]; }
|
|
uint64_t chunks_read() const { return chunks_read_; }
|
|
void set_chunks_read(uint64_t value) { chunks_read_ = value; _has_field_.set(17); }
|
|
|
|
bool has_chunks_committed_out_of_order() const { return _has_field_[11]; }
|
|
uint64_t chunks_committed_out_of_order() const { return chunks_committed_out_of_order_; }
|
|
void set_chunks_committed_out_of_order(uint64_t value) { chunks_committed_out_of_order_ = value; _has_field_.set(11); }
|
|
|
|
bool has_write_wrap_count() const { return _has_field_[4]; }
|
|
uint64_t write_wrap_count() const { return write_wrap_count_; }
|
|
void set_write_wrap_count(uint64_t value) { write_wrap_count_ = value; _has_field_.set(4); }
|
|
|
|
bool has_patches_succeeded() const { return _has_field_[5]; }
|
|
uint64_t patches_succeeded() const { return patches_succeeded_; }
|
|
void set_patches_succeeded(uint64_t value) { patches_succeeded_ = value; _has_field_.set(5); }
|
|
|
|
bool has_patches_failed() const { return _has_field_[6]; }
|
|
uint64_t patches_failed() const { return patches_failed_; }
|
|
void set_patches_failed(uint64_t value) { patches_failed_ = value; _has_field_.set(6); }
|
|
|
|
bool has_readaheads_succeeded() const { return _has_field_[7]; }
|
|
uint64_t readaheads_succeeded() const { return readaheads_succeeded_; }
|
|
void set_readaheads_succeeded(uint64_t value) { readaheads_succeeded_ = value; _has_field_.set(7); }
|
|
|
|
bool has_readaheads_failed() const { return _has_field_[8]; }
|
|
uint64_t readaheads_failed() const { return readaheads_failed_; }
|
|
void set_readaheads_failed(uint64_t value) { readaheads_failed_ = value; _has_field_.set(8); }
|
|
|
|
bool has_abi_violations() const { return _has_field_[9]; }
|
|
uint64_t abi_violations() const { return abi_violations_; }
|
|
void set_abi_violations(uint64_t value) { abi_violations_ = value; _has_field_.set(9); }
|
|
|
|
bool has_trace_writer_packet_loss() const { return _has_field_[19]; }
|
|
uint64_t trace_writer_packet_loss() const { return trace_writer_packet_loss_; }
|
|
void set_trace_writer_packet_loss(uint64_t value) { trace_writer_packet_loss_ = value; _has_field_.set(19); }
|
|
|
|
private:
|
|
uint64_t buffer_size_{};
|
|
uint64_t bytes_written_{};
|
|
uint64_t bytes_overwritten_{};
|
|
uint64_t bytes_read_{};
|
|
uint64_t padding_bytes_written_{};
|
|
uint64_t padding_bytes_cleared_{};
|
|
uint64_t chunks_written_{};
|
|
uint64_t chunks_rewritten_{};
|
|
uint64_t chunks_overwritten_{};
|
|
uint64_t chunks_discarded_{};
|
|
uint64_t chunks_read_{};
|
|
uint64_t chunks_committed_out_of_order_{};
|
|
uint64_t write_wrap_count_{};
|
|
uint64_t patches_succeeded_{};
|
|
uint64_t patches_failed_{};
|
|
uint64_t readaheads_succeeded_{};
|
|
uint64_t readaheads_failed_{};
|
|
uint64_t abi_violations_{};
|
|
uint64_t trace_writer_packet_loss_{};
|
|
|
|
// Allows to preserve unknown protobuf fields for compatibility
|
|
// with future versions of .proto files.
|
|
std::string unknown_fields_;
|
|
|
|
std::bitset<20> _has_field_{};
|
|
};
|
|
|
|
} // namespace perfetto
|
|
} // namespace protos
|
|
} // namespace gen
|
|
|
|
#endif // PERFETTO_PROTOS_PROTOS_PERFETTO_COMMON_TRACE_STATS_PROTO_CPP_H_
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/scattered_heap_buffer.h"
|
|
// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
|
|
#pragma GCC diagnostic push
|
|
#pragma GCC diagnostic ignored "-Wfloat-equal"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/common/trace_stats.gen.h"
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace gen {
|
|
|
|
TraceStats::TraceStats() = default;
|
|
TraceStats::~TraceStats() = default;
|
|
TraceStats::TraceStats(const TraceStats&) = default;
|
|
TraceStats& TraceStats::operator=(const TraceStats&) = default;
|
|
TraceStats::TraceStats(TraceStats&&) noexcept = default;
|
|
TraceStats& TraceStats::operator=(TraceStats&&) = default;
|
|
|
|
bool TraceStats::operator==(const TraceStats& other) const {
|
|
return unknown_fields_ == other.unknown_fields_
|
|
&& buffer_stats_ == other.buffer_stats_
|
|
&& producers_connected_ == other.producers_connected_
|
|
&& producers_seen_ == other.producers_seen_
|
|
&& data_sources_registered_ == other.data_sources_registered_
|
|
&& data_sources_seen_ == other.data_sources_seen_
|
|
&& tracing_sessions_ == other.tracing_sessions_
|
|
&& total_buffers_ == other.total_buffers_
|
|
&& chunks_discarded_ == other.chunks_discarded_
|
|
&& patches_discarded_ == other.patches_discarded_
|
|
&& invalid_packets_ == other.invalid_packets_;
|
|
}
|
|
|
|
bool TraceStats::ParseFromArray(const void* raw, size_t size) {
|
|
buffer_stats_.clear();
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* buffer_stats */:
|
|
buffer_stats_.emplace_back();
|
|
buffer_stats_.back().ParseFromString(field.as_std_string());
|
|
break;
|
|
case 2 /* producers_connected */:
|
|
field.get(&producers_connected_);
|
|
break;
|
|
case 3 /* producers_seen */:
|
|
field.get(&producers_seen_);
|
|
break;
|
|
case 4 /* data_sources_registered */:
|
|
field.get(&data_sources_registered_);
|
|
break;
|
|
case 5 /* data_sources_seen */:
|
|
field.get(&data_sources_seen_);
|
|
break;
|
|
case 6 /* tracing_sessions */:
|
|
field.get(&tracing_sessions_);
|
|
break;
|
|
case 7 /* total_buffers */:
|
|
field.get(&total_buffers_);
|
|
break;
|
|
case 8 /* chunks_discarded */:
|
|
field.get(&chunks_discarded_);
|
|
break;
|
|
case 9 /* patches_discarded */:
|
|
field.get(&patches_discarded_);
|
|
break;
|
|
case 10 /* invalid_packets */:
|
|
field.get(&invalid_packets_);
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string TraceStats::SerializeAsString() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> TraceStats::SerializeAsArray() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void TraceStats::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: buffer_stats
|
|
for (auto& it : buffer_stats_) {
|
|
it.Serialize(msg->BeginNestedMessage<::protozero::Message>(1));
|
|
}
|
|
|
|
// Field 2: producers_connected
|
|
if (_has_field_[2]) {
|
|
msg->AppendVarInt(2, producers_connected_);
|
|
}
|
|
|
|
// Field 3: producers_seen
|
|
if (_has_field_[3]) {
|
|
msg->AppendVarInt(3, producers_seen_);
|
|
}
|
|
|
|
// Field 4: data_sources_registered
|
|
if (_has_field_[4]) {
|
|
msg->AppendVarInt(4, data_sources_registered_);
|
|
}
|
|
|
|
// Field 5: data_sources_seen
|
|
if (_has_field_[5]) {
|
|
msg->AppendVarInt(5, data_sources_seen_);
|
|
}
|
|
|
|
// Field 6: tracing_sessions
|
|
if (_has_field_[6]) {
|
|
msg->AppendVarInt(6, tracing_sessions_);
|
|
}
|
|
|
|
// Field 7: total_buffers
|
|
if (_has_field_[7]) {
|
|
msg->AppendVarInt(7, total_buffers_);
|
|
}
|
|
|
|
// Field 8: chunks_discarded
|
|
if (_has_field_[8]) {
|
|
msg->AppendVarInt(8, chunks_discarded_);
|
|
}
|
|
|
|
// Field 9: patches_discarded
|
|
if (_has_field_[9]) {
|
|
msg->AppendVarInt(9, patches_discarded_);
|
|
}
|
|
|
|
// Field 10: invalid_packets
|
|
if (_has_field_[10]) {
|
|
msg->AppendVarInt(10, invalid_packets_);
|
|
}
|
|
|
|
msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
|
|
}
|
|
|
|
|
|
TraceStats_BufferStats::TraceStats_BufferStats() = default;
|
|
TraceStats_BufferStats::~TraceStats_BufferStats() = default;
|
|
TraceStats_BufferStats::TraceStats_BufferStats(const TraceStats_BufferStats&) = default;
|
|
TraceStats_BufferStats& TraceStats_BufferStats::operator=(const TraceStats_BufferStats&) = default;
|
|
TraceStats_BufferStats::TraceStats_BufferStats(TraceStats_BufferStats&&) noexcept = default;
|
|
TraceStats_BufferStats& TraceStats_BufferStats::operator=(TraceStats_BufferStats&&) = default;
|
|
|
|
bool TraceStats_BufferStats::operator==(const TraceStats_BufferStats& other) const {
|
|
return unknown_fields_ == other.unknown_fields_
|
|
&& buffer_size_ == other.buffer_size_
|
|
&& bytes_written_ == other.bytes_written_
|
|
&& bytes_overwritten_ == other.bytes_overwritten_
|
|
&& bytes_read_ == other.bytes_read_
|
|
&& padding_bytes_written_ == other.padding_bytes_written_
|
|
&& padding_bytes_cleared_ == other.padding_bytes_cleared_
|
|
&& chunks_written_ == other.chunks_written_
|
|
&& chunks_rewritten_ == other.chunks_rewritten_
|
|
&& chunks_overwritten_ == other.chunks_overwritten_
|
|
&& chunks_discarded_ == other.chunks_discarded_
|
|
&& chunks_read_ == other.chunks_read_
|
|
&& chunks_committed_out_of_order_ == other.chunks_committed_out_of_order_
|
|
&& write_wrap_count_ == other.write_wrap_count_
|
|
&& patches_succeeded_ == other.patches_succeeded_
|
|
&& patches_failed_ == other.patches_failed_
|
|
&& readaheads_succeeded_ == other.readaheads_succeeded_
|
|
&& readaheads_failed_ == other.readaheads_failed_
|
|
&& abi_violations_ == other.abi_violations_
|
|
&& trace_writer_packet_loss_ == other.trace_writer_packet_loss_;
|
|
}
|
|
|
|
bool TraceStats_BufferStats::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 12 /* buffer_size */:
|
|
field.get(&buffer_size_);
|
|
break;
|
|
case 1 /* bytes_written */:
|
|
field.get(&bytes_written_);
|
|
break;
|
|
case 13 /* bytes_overwritten */:
|
|
field.get(&bytes_overwritten_);
|
|
break;
|
|
case 14 /* bytes_read */:
|
|
field.get(&bytes_read_);
|
|
break;
|
|
case 15 /* padding_bytes_written */:
|
|
field.get(&padding_bytes_written_);
|
|
break;
|
|
case 16 /* padding_bytes_cleared */:
|
|
field.get(&padding_bytes_cleared_);
|
|
break;
|
|
case 2 /* chunks_written */:
|
|
field.get(&chunks_written_);
|
|
break;
|
|
case 10 /* chunks_rewritten */:
|
|
field.get(&chunks_rewritten_);
|
|
break;
|
|
case 3 /* chunks_overwritten */:
|
|
field.get(&chunks_overwritten_);
|
|
break;
|
|
case 18 /* chunks_discarded */:
|
|
field.get(&chunks_discarded_);
|
|
break;
|
|
case 17 /* chunks_read */:
|
|
field.get(&chunks_read_);
|
|
break;
|
|
case 11 /* chunks_committed_out_of_order */:
|
|
field.get(&chunks_committed_out_of_order_);
|
|
break;
|
|
case 4 /* write_wrap_count */:
|
|
field.get(&write_wrap_count_);
|
|
break;
|
|
case 5 /* patches_succeeded */:
|
|
field.get(&patches_succeeded_);
|
|
break;
|
|
case 6 /* patches_failed */:
|
|
field.get(&patches_failed_);
|
|
break;
|
|
case 7 /* readaheads_succeeded */:
|
|
field.get(&readaheads_succeeded_);
|
|
break;
|
|
case 8 /* readaheads_failed */:
|
|
field.get(&readaheads_failed_);
|
|
break;
|
|
case 9 /* abi_violations */:
|
|
field.get(&abi_violations_);
|
|
break;
|
|
case 19 /* trace_writer_packet_loss */:
|
|
field.get(&trace_writer_packet_loss_);
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string TraceStats_BufferStats::SerializeAsString() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> TraceStats_BufferStats::SerializeAsArray() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void TraceStats_BufferStats::Serialize(::protozero::Message* msg) const {
|
|
// Field 12: buffer_size
|
|
if (_has_field_[12]) {
|
|
msg->AppendVarInt(12, buffer_size_);
|
|
}
|
|
|
|
// Field 1: bytes_written
|
|
if (_has_field_[1]) {
|
|
msg->AppendVarInt(1, bytes_written_);
|
|
}
|
|
|
|
// Field 13: bytes_overwritten
|
|
if (_has_field_[13]) {
|
|
msg->AppendVarInt(13, bytes_overwritten_);
|
|
}
|
|
|
|
// Field 14: bytes_read
|
|
if (_has_field_[14]) {
|
|
msg->AppendVarInt(14, bytes_read_);
|
|
}
|
|
|
|
// Field 15: padding_bytes_written
|
|
if (_has_field_[15]) {
|
|
msg->AppendVarInt(15, padding_bytes_written_);
|
|
}
|
|
|
|
// Field 16: padding_bytes_cleared
|
|
if (_has_field_[16]) {
|
|
msg->AppendVarInt(16, padding_bytes_cleared_);
|
|
}
|
|
|
|
// Field 2: chunks_written
|
|
if (_has_field_[2]) {
|
|
msg->AppendVarInt(2, chunks_written_);
|
|
}
|
|
|
|
// Field 10: chunks_rewritten
|
|
if (_has_field_[10]) {
|
|
msg->AppendVarInt(10, chunks_rewritten_);
|
|
}
|
|
|
|
// Field 3: chunks_overwritten
|
|
if (_has_field_[3]) {
|
|
msg->AppendVarInt(3, chunks_overwritten_);
|
|
}
|
|
|
|
// Field 18: chunks_discarded
|
|
if (_has_field_[18]) {
|
|
msg->AppendVarInt(18, chunks_discarded_);
|
|
}
|
|
|
|
// Field 17: chunks_read
|
|
if (_has_field_[17]) {
|
|
msg->AppendVarInt(17, chunks_read_);
|
|
}
|
|
|
|
// Field 11: chunks_committed_out_of_order
|
|
if (_has_field_[11]) {
|
|
msg->AppendVarInt(11, chunks_committed_out_of_order_);
|
|
}
|
|
|
|
// Field 4: write_wrap_count
|
|
if (_has_field_[4]) {
|
|
msg->AppendVarInt(4, write_wrap_count_);
|
|
}
|
|
|
|
// Field 5: patches_succeeded
|
|
if (_has_field_[5]) {
|
|
msg->AppendVarInt(5, patches_succeeded_);
|
|
}
|
|
|
|
// Field 6: patches_failed
|
|
if (_has_field_[6]) {
|
|
msg->AppendVarInt(6, patches_failed_);
|
|
}
|
|
|
|
// Field 7: readaheads_succeeded
|
|
if (_has_field_[7]) {
|
|
msg->AppendVarInt(7, readaheads_succeeded_);
|
|
}
|
|
|
|
// Field 8: readaheads_failed
|
|
if (_has_field_[8]) {
|
|
msg->AppendVarInt(8, readaheads_failed_);
|
|
}
|
|
|
|
// Field 9: abi_violations
|
|
if (_has_field_[9]) {
|
|
msg->AppendVarInt(9, abi_violations_);
|
|
}
|
|
|
|
// Field 19: trace_writer_packet_loss
|
|
if (_has_field_[19]) {
|
|
msg->AppendVarInt(19, trace_writer_packet_loss_);
|
|
}
|
|
|
|
msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
|
|
}
|
|
|
|
} // namespace perfetto
|
|
} // namespace protos
|
|
} // namespace gen
|
|
#pragma GCC diagnostic pop
|
|
// gen_amalgamated begin source: gen/protos/perfetto/common/tracing_service_capabilities.gen.cc
|
|
// gen_amalgamated begin header: gen/protos/perfetto/common/tracing_service_capabilities.gen.h
|
|
// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
|
|
#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_COMMON_TRACING_SERVICE_CAPABILITIES_PROTO_CPP_H_
|
|
#define PERFETTO_PROTOS_PROTOS_PERFETTO_COMMON_TRACING_SERVICE_CAPABILITIES_PROTO_CPP_H_
|
|
|
|
#include <stdint.h>
|
|
#include <bitset>
|
|
#include <vector>
|
|
#include <string>
|
|
#include <type_traits>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/cpp_message_obj.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/copyable_ptr.h"
|
|
// gen_amalgamated expanded: #include "perfetto/base/export.h"
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace gen {
|
|
class TracingServiceCapabilities;
|
|
enum ObservableEvents_Type : int;
|
|
} // namespace perfetto
|
|
} // namespace protos
|
|
} // namespace gen
|
|
|
|
namespace protozero {
|
|
class Message;
|
|
} // namespace protozero
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace gen {
|
|
|
|
class PERFETTO_EXPORT TracingServiceCapabilities : public ::protozero::CppMessageObj {
|
|
public:
|
|
enum FieldNumbers {
|
|
kHasQueryCapabilitiesFieldNumber = 1,
|
|
kObservableEventsFieldNumber = 2,
|
|
kHasTraceConfigOutputPathFieldNumber = 3,
|
|
};
|
|
|
|
TracingServiceCapabilities();
|
|
~TracingServiceCapabilities() override;
|
|
TracingServiceCapabilities(TracingServiceCapabilities&&) noexcept;
|
|
TracingServiceCapabilities& operator=(TracingServiceCapabilities&&);
|
|
TracingServiceCapabilities(const TracingServiceCapabilities&);
|
|
TracingServiceCapabilities& operator=(const TracingServiceCapabilities&);
|
|
bool operator==(const TracingServiceCapabilities&) const;
|
|
bool operator!=(const TracingServiceCapabilities& other) const { return !(*this == other); }
|
|
|
|
bool ParseFromArray(const void*, size_t) override;
|
|
std::string SerializeAsString() const override;
|
|
std::vector<uint8_t> SerializeAsArray() const override;
|
|
void Serialize(::protozero::Message*) const;
|
|
|
|
bool has_has_query_capabilities() const { return _has_field_[1]; }
|
|
bool has_query_capabilities() const { return has_query_capabilities_; }
|
|
void set_has_query_capabilities(bool value) { has_query_capabilities_ = value; _has_field_.set(1); }
|
|
|
|
int observable_events_size() const { return static_cast<int>(observable_events_.size()); }
|
|
const std::vector<ObservableEvents_Type>& observable_events() const { return observable_events_; }
|
|
std::vector<ObservableEvents_Type>* mutable_observable_events() { return &observable_events_; }
|
|
void clear_observable_events() { observable_events_.clear(); }
|
|
void add_observable_events(ObservableEvents_Type value) { observable_events_.emplace_back(value); }
|
|
ObservableEvents_Type* add_observable_events() { observable_events_.emplace_back(); return &observable_events_.back(); }
|
|
|
|
bool has_has_trace_config_output_path() const { return _has_field_[3]; }
|
|
bool has_trace_config_output_path() const { return has_trace_config_output_path_; }
|
|
void set_has_trace_config_output_path(bool value) { has_trace_config_output_path_ = value; _has_field_.set(3); }
|
|
|
|
private:
|
|
bool has_query_capabilities_{};
|
|
std::vector<ObservableEvents_Type> observable_events_;
|
|
bool has_trace_config_output_path_{};
|
|
|
|
// Allows to preserve unknown protobuf fields for compatibility
|
|
// with future versions of .proto files.
|
|
std::string unknown_fields_;
|
|
|
|
std::bitset<4> _has_field_{};
|
|
};
|
|
|
|
} // namespace perfetto
|
|
} // namespace protos
|
|
} // namespace gen
|
|
|
|
#endif // PERFETTO_PROTOS_PROTOS_PERFETTO_COMMON_TRACING_SERVICE_CAPABILITIES_PROTO_CPP_H_
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/scattered_heap_buffer.h"
|
|
// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
|
|
#pragma GCC diagnostic push
|
|
#pragma GCC diagnostic ignored "-Wfloat-equal"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/common/tracing_service_capabilities.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/common/observable_events.gen.h"
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace gen {
|
|
|
|
TracingServiceCapabilities::TracingServiceCapabilities() = default;
|
|
TracingServiceCapabilities::~TracingServiceCapabilities() = default;
|
|
TracingServiceCapabilities::TracingServiceCapabilities(const TracingServiceCapabilities&) = default;
|
|
TracingServiceCapabilities& TracingServiceCapabilities::operator=(const TracingServiceCapabilities&) = default;
|
|
TracingServiceCapabilities::TracingServiceCapabilities(TracingServiceCapabilities&&) noexcept = default;
|
|
TracingServiceCapabilities& TracingServiceCapabilities::operator=(TracingServiceCapabilities&&) = default;
|
|
|
|
bool TracingServiceCapabilities::operator==(const TracingServiceCapabilities& other) const {
|
|
return unknown_fields_ == other.unknown_fields_
|
|
&& has_query_capabilities_ == other.has_query_capabilities_
|
|
&& observable_events_ == other.observable_events_
|
|
&& has_trace_config_output_path_ == other.has_trace_config_output_path_;
|
|
}
|
|
|
|
bool TracingServiceCapabilities::ParseFromArray(const void* raw, size_t size) {
|
|
observable_events_.clear();
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* has_query_capabilities */:
|
|
field.get(&has_query_capabilities_);
|
|
break;
|
|
case 2 /* observable_events */:
|
|
observable_events_.emplace_back();
|
|
field.get(&observable_events_.back());
|
|
break;
|
|
case 3 /* has_trace_config_output_path */:
|
|
field.get(&has_trace_config_output_path_);
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string TracingServiceCapabilities::SerializeAsString() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> TracingServiceCapabilities::SerializeAsArray() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void TracingServiceCapabilities::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: has_query_capabilities
|
|
if (_has_field_[1]) {
|
|
msg->AppendTinyVarInt(1, has_query_capabilities_);
|
|
}
|
|
|
|
// Field 2: observable_events
|
|
for (auto& it : observable_events_) {
|
|
msg->AppendVarInt(2, it);
|
|
}
|
|
|
|
// Field 3: has_trace_config_output_path
|
|
if (_has_field_[3]) {
|
|
msg->AppendTinyVarInt(3, has_trace_config_output_path_);
|
|
}
|
|
|
|
msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
|
|
}
|
|
|
|
} // namespace perfetto
|
|
} // namespace protos
|
|
} // namespace gen
|
|
#pragma GCC diagnostic pop
|
|
// gen_amalgamated begin source: gen/protos/perfetto/common/tracing_service_state.gen.cc
|
|
// gen_amalgamated begin header: gen/protos/perfetto/common/tracing_service_state.gen.h
|
|
// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
|
|
#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_COMMON_TRACING_SERVICE_STATE_PROTO_CPP_H_
|
|
#define PERFETTO_PROTOS_PROTOS_PERFETTO_COMMON_TRACING_SERVICE_STATE_PROTO_CPP_H_
|
|
|
|
#include <stdint.h>
|
|
#include <bitset>
|
|
#include <vector>
|
|
#include <string>
|
|
#include <type_traits>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/cpp_message_obj.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/copyable_ptr.h"
|
|
// gen_amalgamated expanded: #include "perfetto/base/export.h"
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace gen {
|
|
class TracingServiceState;
|
|
class TracingServiceState_DataSource;
|
|
class DataSourceDescriptor;
|
|
class TracingServiceState_Producer;
|
|
} // namespace perfetto
|
|
} // namespace protos
|
|
} // namespace gen
|
|
|
|
namespace protozero {
|
|
class Message;
|
|
} // namespace protozero
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace gen {
|
|
|
|
class PERFETTO_EXPORT TracingServiceState : public ::protozero::CppMessageObj {
|
|
public:
|
|
using Producer = TracingServiceState_Producer;
|
|
using DataSource = TracingServiceState_DataSource;
|
|
enum FieldNumbers {
|
|
kProducersFieldNumber = 1,
|
|
kDataSourcesFieldNumber = 2,
|
|
kNumSessionsFieldNumber = 3,
|
|
kNumSessionsStartedFieldNumber = 4,
|
|
};
|
|
|
|
TracingServiceState();
|
|
~TracingServiceState() override;
|
|
TracingServiceState(TracingServiceState&&) noexcept;
|
|
TracingServiceState& operator=(TracingServiceState&&);
|
|
TracingServiceState(const TracingServiceState&);
|
|
TracingServiceState& operator=(const TracingServiceState&);
|
|
bool operator==(const TracingServiceState&) const;
|
|
bool operator!=(const TracingServiceState& other) const { return !(*this == other); }
|
|
|
|
bool ParseFromArray(const void*, size_t) override;
|
|
std::string SerializeAsString() const override;
|
|
std::vector<uint8_t> SerializeAsArray() const override;
|
|
void Serialize(::protozero::Message*) const;
|
|
|
|
int producers_size() const { return static_cast<int>(producers_.size()); }
|
|
const std::vector<TracingServiceState_Producer>& producers() const { return producers_; }
|
|
std::vector<TracingServiceState_Producer>* mutable_producers() { return &producers_; }
|
|
void clear_producers() { producers_.clear(); }
|
|
TracingServiceState_Producer* add_producers() { producers_.emplace_back(); return &producers_.back(); }
|
|
|
|
int data_sources_size() const { return static_cast<int>(data_sources_.size()); }
|
|
const std::vector<TracingServiceState_DataSource>& data_sources() const { return data_sources_; }
|
|
std::vector<TracingServiceState_DataSource>* mutable_data_sources() { return &data_sources_; }
|
|
void clear_data_sources() { data_sources_.clear(); }
|
|
TracingServiceState_DataSource* add_data_sources() { data_sources_.emplace_back(); return &data_sources_.back(); }
|
|
|
|
bool has_num_sessions() const { return _has_field_[3]; }
|
|
int32_t num_sessions() const { return num_sessions_; }
|
|
void set_num_sessions(int32_t value) { num_sessions_ = value; _has_field_.set(3); }
|
|
|
|
bool has_num_sessions_started() const { return _has_field_[4]; }
|
|
int32_t num_sessions_started() const { return num_sessions_started_; }
|
|
void set_num_sessions_started(int32_t value) { num_sessions_started_ = value; _has_field_.set(4); }
|
|
|
|
private:
|
|
std::vector<TracingServiceState_Producer> producers_;
|
|
std::vector<TracingServiceState_DataSource> data_sources_;
|
|
int32_t num_sessions_{};
|
|
int32_t num_sessions_started_{};
|
|
|
|
// Allows to preserve unknown protobuf fields for compatibility
|
|
// with future versions of .proto files.
|
|
std::string unknown_fields_;
|
|
|
|
std::bitset<5> _has_field_{};
|
|
};
|
|
|
|
|
|
class PERFETTO_EXPORT TracingServiceState_DataSource : public ::protozero::CppMessageObj {
|
|
public:
|
|
enum FieldNumbers {
|
|
kDsDescriptorFieldNumber = 1,
|
|
kProducerIdFieldNumber = 2,
|
|
};
|
|
|
|
TracingServiceState_DataSource();
|
|
~TracingServiceState_DataSource() override;
|
|
TracingServiceState_DataSource(TracingServiceState_DataSource&&) noexcept;
|
|
TracingServiceState_DataSource& operator=(TracingServiceState_DataSource&&);
|
|
TracingServiceState_DataSource(const TracingServiceState_DataSource&);
|
|
TracingServiceState_DataSource& operator=(const TracingServiceState_DataSource&);
|
|
bool operator==(const TracingServiceState_DataSource&) const;
|
|
bool operator!=(const TracingServiceState_DataSource& other) const { return !(*this == other); }
|
|
|
|
bool ParseFromArray(const void*, size_t) override;
|
|
std::string SerializeAsString() const override;
|
|
std::vector<uint8_t> SerializeAsArray() const override;
|
|
void Serialize(::protozero::Message*) const;
|
|
|
|
bool has_ds_descriptor() const { return _has_field_[1]; }
|
|
const DataSourceDescriptor& ds_descriptor() const { return *ds_descriptor_; }
|
|
DataSourceDescriptor* mutable_ds_descriptor() { _has_field_.set(1); return ds_descriptor_.get(); }
|
|
|
|
bool has_producer_id() const { return _has_field_[2]; }
|
|
int32_t producer_id() const { return producer_id_; }
|
|
void set_producer_id(int32_t value) { producer_id_ = value; _has_field_.set(2); }
|
|
|
|
private:
|
|
::protozero::CopyablePtr<DataSourceDescriptor> ds_descriptor_;
|
|
int32_t producer_id_{};
|
|
|
|
// Allows to preserve unknown protobuf fields for compatibility
|
|
// with future versions of .proto files.
|
|
std::string unknown_fields_;
|
|
|
|
std::bitset<3> _has_field_{};
|
|
};
|
|
|
|
|
|
class PERFETTO_EXPORT TracingServiceState_Producer : public ::protozero::CppMessageObj {
|
|
public:
|
|
enum FieldNumbers {
|
|
kIdFieldNumber = 1,
|
|
kNameFieldNumber = 2,
|
|
kUidFieldNumber = 3,
|
|
};
|
|
|
|
TracingServiceState_Producer();
|
|
~TracingServiceState_Producer() override;
|
|
TracingServiceState_Producer(TracingServiceState_Producer&&) noexcept;
|
|
TracingServiceState_Producer& operator=(TracingServiceState_Producer&&);
|
|
TracingServiceState_Producer(const TracingServiceState_Producer&);
|
|
TracingServiceState_Producer& operator=(const TracingServiceState_Producer&);
|
|
bool operator==(const TracingServiceState_Producer&) const;
|
|
bool operator!=(const TracingServiceState_Producer& other) const { return !(*this == other); }
|
|
|
|
bool ParseFromArray(const void*, size_t) override;
|
|
std::string SerializeAsString() const override;
|
|
std::vector<uint8_t> SerializeAsArray() const override;
|
|
void Serialize(::protozero::Message*) const;
|
|
|
|
bool has_id() const { return _has_field_[1]; }
|
|
int32_t id() const { return id_; }
|
|
void set_id(int32_t value) { id_ = value; _has_field_.set(1); }
|
|
|
|
bool has_name() const { return _has_field_[2]; }
|
|
const std::string& name() const { return name_; }
|
|
void set_name(const std::string& value) { name_ = value; _has_field_.set(2); }
|
|
|
|
bool has_uid() const { return _has_field_[3]; }
|
|
int32_t uid() const { return uid_; }
|
|
void set_uid(int32_t value) { uid_ = value; _has_field_.set(3); }
|
|
|
|
private:
|
|
int32_t id_{};
|
|
std::string name_{};
|
|
int32_t uid_{};
|
|
|
|
// Allows to preserve unknown protobuf fields for compatibility
|
|
// with future versions of .proto files.
|
|
std::string unknown_fields_;
|
|
|
|
std::bitset<4> _has_field_{};
|
|
};
|
|
|
|
} // namespace perfetto
|
|
} // namespace protos
|
|
} // namespace gen
|
|
|
|
#endif // PERFETTO_PROTOS_PROTOS_PERFETTO_COMMON_TRACING_SERVICE_STATE_PROTO_CPP_H_
|
|
// gen_amalgamated begin header: gen/protos/perfetto/common/track_event_descriptor.gen.h
|
|
// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
|
|
#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_COMMON_TRACK_EVENT_DESCRIPTOR_PROTO_CPP_H_
|
|
#define PERFETTO_PROTOS_PROTOS_PERFETTO_COMMON_TRACK_EVENT_DESCRIPTOR_PROTO_CPP_H_
|
|
|
|
#include <stdint.h>
|
|
#include <bitset>
|
|
#include <vector>
|
|
#include <string>
|
|
#include <type_traits>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/cpp_message_obj.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/copyable_ptr.h"
|
|
// gen_amalgamated expanded: #include "perfetto/base/export.h"
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace gen {
|
|
class TrackEventDescriptor;
|
|
class TrackEventCategory;
|
|
} // namespace perfetto
|
|
} // namespace protos
|
|
} // namespace gen
|
|
|
|
namespace protozero {
|
|
class Message;
|
|
} // namespace protozero
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace gen {
|
|
|
|
class PERFETTO_EXPORT TrackEventDescriptor : public ::protozero::CppMessageObj {
|
|
public:
|
|
enum FieldNumbers {
|
|
kAvailableCategoriesFieldNumber = 1,
|
|
};
|
|
|
|
TrackEventDescriptor();
|
|
~TrackEventDescriptor() override;
|
|
TrackEventDescriptor(TrackEventDescriptor&&) noexcept;
|
|
TrackEventDescriptor& operator=(TrackEventDescriptor&&);
|
|
TrackEventDescriptor(const TrackEventDescriptor&);
|
|
TrackEventDescriptor& operator=(const TrackEventDescriptor&);
|
|
bool operator==(const TrackEventDescriptor&) const;
|
|
bool operator!=(const TrackEventDescriptor& other) const { return !(*this == other); }
|
|
|
|
bool ParseFromArray(const void*, size_t) override;
|
|
std::string SerializeAsString() const override;
|
|
std::vector<uint8_t> SerializeAsArray() const override;
|
|
void Serialize(::protozero::Message*) const;
|
|
|
|
int available_categories_size() const { return static_cast<int>(available_categories_.size()); }
|
|
const std::vector<TrackEventCategory>& available_categories() const { return available_categories_; }
|
|
std::vector<TrackEventCategory>* mutable_available_categories() { return &available_categories_; }
|
|
void clear_available_categories() { available_categories_.clear(); }
|
|
TrackEventCategory* add_available_categories() { available_categories_.emplace_back(); return &available_categories_.back(); }
|
|
|
|
private:
|
|
std::vector<TrackEventCategory> available_categories_;
|
|
|
|
// Allows to preserve unknown protobuf fields for compatibility
|
|
// with future versions of .proto files.
|
|
std::string unknown_fields_;
|
|
|
|
std::bitset<2> _has_field_{};
|
|
};
|
|
|
|
|
|
class PERFETTO_EXPORT TrackEventCategory : public ::protozero::CppMessageObj {
|
|
public:
|
|
enum FieldNumbers {
|
|
kNameFieldNumber = 1,
|
|
kDescriptionFieldNumber = 2,
|
|
kTagsFieldNumber = 3,
|
|
};
|
|
|
|
TrackEventCategory();
|
|
~TrackEventCategory() override;
|
|
TrackEventCategory(TrackEventCategory&&) noexcept;
|
|
TrackEventCategory& operator=(TrackEventCategory&&);
|
|
TrackEventCategory(const TrackEventCategory&);
|
|
TrackEventCategory& operator=(const TrackEventCategory&);
|
|
bool operator==(const TrackEventCategory&) const;
|
|
bool operator!=(const TrackEventCategory& other) const { return !(*this == other); }
|
|
|
|
bool ParseFromArray(const void*, size_t) override;
|
|
std::string SerializeAsString() const override;
|
|
std::vector<uint8_t> SerializeAsArray() const override;
|
|
void Serialize(::protozero::Message*) const;
|
|
|
|
bool has_name() const { return _has_field_[1]; }
|
|
const std::string& name() const { return name_; }
|
|
void set_name(const std::string& value) { name_ = value; _has_field_.set(1); }
|
|
|
|
bool has_description() const { return _has_field_[2]; }
|
|
const std::string& description() const { return description_; }
|
|
void set_description(const std::string& value) { description_ = value; _has_field_.set(2); }
|
|
|
|
int tags_size() const { return static_cast<int>(tags_.size()); }
|
|
const std::vector<std::string>& tags() const { return tags_; }
|
|
std::vector<std::string>* mutable_tags() { return &tags_; }
|
|
void clear_tags() { tags_.clear(); }
|
|
void add_tags(std::string value) { tags_.emplace_back(value); }
|
|
std::string* add_tags() { tags_.emplace_back(); return &tags_.back(); }
|
|
|
|
private:
|
|
std::string name_{};
|
|
std::string description_{};
|
|
std::vector<std::string> tags_;
|
|
|
|
// Allows to preserve unknown protobuf fields for compatibility
|
|
// with future versions of .proto files.
|
|
std::string unknown_fields_;
|
|
|
|
std::bitset<4> _has_field_{};
|
|
};
|
|
|
|
} // namespace perfetto
|
|
} // namespace protos
|
|
} // namespace gen
|
|
|
|
#endif // PERFETTO_PROTOS_PROTOS_PERFETTO_COMMON_TRACK_EVENT_DESCRIPTOR_PROTO_CPP_H_
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/scattered_heap_buffer.h"
|
|
// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
|
|
#pragma GCC diagnostic push
|
|
#pragma GCC diagnostic ignored "-Wfloat-equal"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/common/tracing_service_state.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/common/data_source_descriptor.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/common/track_event_descriptor.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/common/gpu_counter_descriptor.gen.h"
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace gen {
|
|
|
|
TracingServiceState::TracingServiceState() = default;
|
|
TracingServiceState::~TracingServiceState() = default;
|
|
TracingServiceState::TracingServiceState(const TracingServiceState&) = default;
|
|
TracingServiceState& TracingServiceState::operator=(const TracingServiceState&) = default;
|
|
TracingServiceState::TracingServiceState(TracingServiceState&&) noexcept = default;
|
|
TracingServiceState& TracingServiceState::operator=(TracingServiceState&&) = default;
|
|
|
|
bool TracingServiceState::operator==(const TracingServiceState& other) const {
|
|
return unknown_fields_ == other.unknown_fields_
|
|
&& producers_ == other.producers_
|
|
&& data_sources_ == other.data_sources_
|
|
&& num_sessions_ == other.num_sessions_
|
|
&& num_sessions_started_ == other.num_sessions_started_;
|
|
}
|
|
|
|
bool TracingServiceState::ParseFromArray(const void* raw, size_t size) {
|
|
producers_.clear();
|
|
data_sources_.clear();
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* producers */:
|
|
producers_.emplace_back();
|
|
producers_.back().ParseFromString(field.as_std_string());
|
|
break;
|
|
case 2 /* data_sources */:
|
|
data_sources_.emplace_back();
|
|
data_sources_.back().ParseFromString(field.as_std_string());
|
|
break;
|
|
case 3 /* num_sessions */:
|
|
field.get(&num_sessions_);
|
|
break;
|
|
case 4 /* num_sessions_started */:
|
|
field.get(&num_sessions_started_);
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string TracingServiceState::SerializeAsString() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> TracingServiceState::SerializeAsArray() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void TracingServiceState::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: producers
|
|
for (auto& it : producers_) {
|
|
it.Serialize(msg->BeginNestedMessage<::protozero::Message>(1));
|
|
}
|
|
|
|
// Field 2: data_sources
|
|
for (auto& it : data_sources_) {
|
|
it.Serialize(msg->BeginNestedMessage<::protozero::Message>(2));
|
|
}
|
|
|
|
// Field 3: num_sessions
|
|
if (_has_field_[3]) {
|
|
msg->AppendVarInt(3, num_sessions_);
|
|
}
|
|
|
|
// Field 4: num_sessions_started
|
|
if (_has_field_[4]) {
|
|
msg->AppendVarInt(4, num_sessions_started_);
|
|
}
|
|
|
|
msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
|
|
}
|
|
|
|
|
|
TracingServiceState_DataSource::TracingServiceState_DataSource() = default;
|
|
TracingServiceState_DataSource::~TracingServiceState_DataSource() = default;
|
|
TracingServiceState_DataSource::TracingServiceState_DataSource(const TracingServiceState_DataSource&) = default;
|
|
TracingServiceState_DataSource& TracingServiceState_DataSource::operator=(const TracingServiceState_DataSource&) = default;
|
|
TracingServiceState_DataSource::TracingServiceState_DataSource(TracingServiceState_DataSource&&) noexcept = default;
|
|
TracingServiceState_DataSource& TracingServiceState_DataSource::operator=(TracingServiceState_DataSource&&) = default;
|
|
|
|
bool TracingServiceState_DataSource::operator==(const TracingServiceState_DataSource& other) const {
|
|
return unknown_fields_ == other.unknown_fields_
|
|
&& ds_descriptor_ == other.ds_descriptor_
|
|
&& producer_id_ == other.producer_id_;
|
|
}
|
|
|
|
bool TracingServiceState_DataSource::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* ds_descriptor */:
|
|
(*ds_descriptor_).ParseFromString(field.as_std_string());
|
|
break;
|
|
case 2 /* producer_id */:
|
|
field.get(&producer_id_);
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string TracingServiceState_DataSource::SerializeAsString() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> TracingServiceState_DataSource::SerializeAsArray() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void TracingServiceState_DataSource::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: ds_descriptor
|
|
if (_has_field_[1]) {
|
|
(*ds_descriptor_).Serialize(msg->BeginNestedMessage<::protozero::Message>(1));
|
|
}
|
|
|
|
// Field 2: producer_id
|
|
if (_has_field_[2]) {
|
|
msg->AppendVarInt(2, producer_id_);
|
|
}
|
|
|
|
msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
|
|
}
|
|
|
|
|
|
TracingServiceState_Producer::TracingServiceState_Producer() = default;
|
|
TracingServiceState_Producer::~TracingServiceState_Producer() = default;
|
|
TracingServiceState_Producer::TracingServiceState_Producer(const TracingServiceState_Producer&) = default;
|
|
TracingServiceState_Producer& TracingServiceState_Producer::operator=(const TracingServiceState_Producer&) = default;
|
|
TracingServiceState_Producer::TracingServiceState_Producer(TracingServiceState_Producer&&) noexcept = default;
|
|
TracingServiceState_Producer& TracingServiceState_Producer::operator=(TracingServiceState_Producer&&) = default;
|
|
|
|
bool TracingServiceState_Producer::operator==(const TracingServiceState_Producer& other) const {
|
|
return unknown_fields_ == other.unknown_fields_
|
|
&& id_ == other.id_
|
|
&& name_ == other.name_
|
|
&& uid_ == other.uid_;
|
|
}
|
|
|
|
bool TracingServiceState_Producer::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* id */:
|
|
field.get(&id_);
|
|
break;
|
|
case 2 /* name */:
|
|
field.get(&name_);
|
|
break;
|
|
case 3 /* uid */:
|
|
field.get(&uid_);
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string TracingServiceState_Producer::SerializeAsString() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> TracingServiceState_Producer::SerializeAsArray() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void TracingServiceState_Producer::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: id
|
|
if (_has_field_[1]) {
|
|
msg->AppendVarInt(1, id_);
|
|
}
|
|
|
|
// Field 2: name
|
|
if (_has_field_[2]) {
|
|
msg->AppendString(2, name_);
|
|
}
|
|
|
|
// Field 3: uid
|
|
if (_has_field_[3]) {
|
|
msg->AppendVarInt(3, uid_);
|
|
}
|
|
|
|
msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
|
|
}
|
|
|
|
} // namespace perfetto
|
|
} // namespace protos
|
|
} // namespace gen
|
|
#pragma GCC diagnostic pop
|
|
// gen_amalgamated begin source: gen/protos/perfetto/common/track_event_descriptor.gen.cc
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/scattered_heap_buffer.h"
|
|
// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
|
|
#pragma GCC diagnostic push
|
|
#pragma GCC diagnostic ignored "-Wfloat-equal"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/common/track_event_descriptor.gen.h"
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace gen {
|
|
|
|
TrackEventDescriptor::TrackEventDescriptor() = default;
|
|
TrackEventDescriptor::~TrackEventDescriptor() = default;
|
|
TrackEventDescriptor::TrackEventDescriptor(const TrackEventDescriptor&) = default;
|
|
TrackEventDescriptor& TrackEventDescriptor::operator=(const TrackEventDescriptor&) = default;
|
|
TrackEventDescriptor::TrackEventDescriptor(TrackEventDescriptor&&) noexcept = default;
|
|
TrackEventDescriptor& TrackEventDescriptor::operator=(TrackEventDescriptor&&) = default;
|
|
|
|
bool TrackEventDescriptor::operator==(const TrackEventDescriptor& other) const {
|
|
return unknown_fields_ == other.unknown_fields_
|
|
&& available_categories_ == other.available_categories_;
|
|
}
|
|
|
|
bool TrackEventDescriptor::ParseFromArray(const void* raw, size_t size) {
|
|
available_categories_.clear();
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* available_categories */:
|
|
available_categories_.emplace_back();
|
|
available_categories_.back().ParseFromString(field.as_std_string());
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string TrackEventDescriptor::SerializeAsString() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> TrackEventDescriptor::SerializeAsArray() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void TrackEventDescriptor::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: available_categories
|
|
for (auto& it : available_categories_) {
|
|
it.Serialize(msg->BeginNestedMessage<::protozero::Message>(1));
|
|
}
|
|
|
|
msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
|
|
}
|
|
|
|
|
|
TrackEventCategory::TrackEventCategory() = default;
|
|
TrackEventCategory::~TrackEventCategory() = default;
|
|
TrackEventCategory::TrackEventCategory(const TrackEventCategory&) = default;
|
|
TrackEventCategory& TrackEventCategory::operator=(const TrackEventCategory&) = default;
|
|
TrackEventCategory::TrackEventCategory(TrackEventCategory&&) noexcept = default;
|
|
TrackEventCategory& TrackEventCategory::operator=(TrackEventCategory&&) = default;
|
|
|
|
bool TrackEventCategory::operator==(const TrackEventCategory& other) const {
|
|
return unknown_fields_ == other.unknown_fields_
|
|
&& name_ == other.name_
|
|
&& description_ == other.description_
|
|
&& tags_ == other.tags_;
|
|
}
|
|
|
|
bool TrackEventCategory::ParseFromArray(const void* raw, size_t size) {
|
|
tags_.clear();
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* name */:
|
|
field.get(&name_);
|
|
break;
|
|
case 2 /* description */:
|
|
field.get(&description_);
|
|
break;
|
|
case 3 /* tags */:
|
|
tags_.emplace_back();
|
|
field.get(&tags_.back());
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string TrackEventCategory::SerializeAsString() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> TrackEventCategory::SerializeAsArray() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void TrackEventCategory::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: name
|
|
if (_has_field_[1]) {
|
|
msg->AppendString(1, name_);
|
|
}
|
|
|
|
// Field 2: description
|
|
if (_has_field_[2]) {
|
|
msg->AppendString(2, description_);
|
|
}
|
|
|
|
// Field 3: tags
|
|
for (auto& it : tags_) {
|
|
msg->AppendString(3, it);
|
|
}
|
|
|
|
msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
|
|
}
|
|
|
|
} // namespace perfetto
|
|
} // namespace protos
|
|
} // namespace gen
|
|
#pragma GCC diagnostic pop
|
|
// gen_amalgamated begin source: gen/protos/perfetto/config/android/android_log_config.gen.cc
|
|
// gen_amalgamated begin header: gen/protos/perfetto/config/android/android_log_config.gen.h
|
|
// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
|
|
#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_CONFIG_ANDROID_ANDROID_LOG_CONFIG_PROTO_CPP_H_
|
|
#define PERFETTO_PROTOS_PROTOS_PERFETTO_CONFIG_ANDROID_ANDROID_LOG_CONFIG_PROTO_CPP_H_
|
|
|
|
#include <stdint.h>
|
|
#include <bitset>
|
|
#include <vector>
|
|
#include <string>
|
|
#include <type_traits>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/cpp_message_obj.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/copyable_ptr.h"
|
|
// gen_amalgamated expanded: #include "perfetto/base/export.h"
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace gen {
|
|
class AndroidLogConfig;
|
|
enum AndroidLogId : int;
|
|
enum AndroidLogPriority : int;
|
|
} // namespace perfetto
|
|
} // namespace protos
|
|
} // namespace gen
|
|
|
|
namespace protozero {
|
|
class Message;
|
|
} // namespace protozero
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace gen {
|
|
|
|
class PERFETTO_EXPORT AndroidLogConfig : public ::protozero::CppMessageObj {
|
|
public:
|
|
enum FieldNumbers {
|
|
kLogIdsFieldNumber = 1,
|
|
kMinPrioFieldNumber = 3,
|
|
kFilterTagsFieldNumber = 4,
|
|
};
|
|
|
|
AndroidLogConfig();
|
|
~AndroidLogConfig() override;
|
|
AndroidLogConfig(AndroidLogConfig&&) noexcept;
|
|
AndroidLogConfig& operator=(AndroidLogConfig&&);
|
|
AndroidLogConfig(const AndroidLogConfig&);
|
|
AndroidLogConfig& operator=(const AndroidLogConfig&);
|
|
bool operator==(const AndroidLogConfig&) const;
|
|
bool operator!=(const AndroidLogConfig& other) const { return !(*this == other); }
|
|
|
|
bool ParseFromArray(const void*, size_t) override;
|
|
std::string SerializeAsString() const override;
|
|
std::vector<uint8_t> SerializeAsArray() const override;
|
|
void Serialize(::protozero::Message*) const;
|
|
|
|
int log_ids_size() const { return static_cast<int>(log_ids_.size()); }
|
|
const std::vector<AndroidLogId>& log_ids() const { return log_ids_; }
|
|
std::vector<AndroidLogId>* mutable_log_ids() { return &log_ids_; }
|
|
void clear_log_ids() { log_ids_.clear(); }
|
|
void add_log_ids(AndroidLogId value) { log_ids_.emplace_back(value); }
|
|
AndroidLogId* add_log_ids() { log_ids_.emplace_back(); return &log_ids_.back(); }
|
|
|
|
bool has_min_prio() const { return _has_field_[3]; }
|
|
AndroidLogPriority min_prio() const { return min_prio_; }
|
|
void set_min_prio(AndroidLogPriority value) { min_prio_ = value; _has_field_.set(3); }
|
|
|
|
int filter_tags_size() const { return static_cast<int>(filter_tags_.size()); }
|
|
const std::vector<std::string>& filter_tags() const { return filter_tags_; }
|
|
std::vector<std::string>* mutable_filter_tags() { return &filter_tags_; }
|
|
void clear_filter_tags() { filter_tags_.clear(); }
|
|
void add_filter_tags(std::string value) { filter_tags_.emplace_back(value); }
|
|
std::string* add_filter_tags() { filter_tags_.emplace_back(); return &filter_tags_.back(); }
|
|
|
|
private:
|
|
std::vector<AndroidLogId> log_ids_;
|
|
AndroidLogPriority min_prio_{};
|
|
std::vector<std::string> filter_tags_;
|
|
|
|
// Allows to preserve unknown protobuf fields for compatibility
|
|
// with future versions of .proto files.
|
|
std::string unknown_fields_;
|
|
|
|
std::bitset<5> _has_field_{};
|
|
};
|
|
|
|
} // namespace perfetto
|
|
} // namespace protos
|
|
} // namespace gen
|
|
|
|
#endif // PERFETTO_PROTOS_PROTOS_PERFETTO_CONFIG_ANDROID_ANDROID_LOG_CONFIG_PROTO_CPP_H_
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/scattered_heap_buffer.h"
|
|
// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
|
|
#pragma GCC diagnostic push
|
|
#pragma GCC diagnostic ignored "-Wfloat-equal"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/android/android_log_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/common/android_log_constants.gen.h"
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace gen {
|
|
|
|
AndroidLogConfig::AndroidLogConfig() = default;
|
|
AndroidLogConfig::~AndroidLogConfig() = default;
|
|
AndroidLogConfig::AndroidLogConfig(const AndroidLogConfig&) = default;
|
|
AndroidLogConfig& AndroidLogConfig::operator=(const AndroidLogConfig&) = default;
|
|
AndroidLogConfig::AndroidLogConfig(AndroidLogConfig&&) noexcept = default;
|
|
AndroidLogConfig& AndroidLogConfig::operator=(AndroidLogConfig&&) = default;
|
|
|
|
bool AndroidLogConfig::operator==(const AndroidLogConfig& other) const {
|
|
return unknown_fields_ == other.unknown_fields_
|
|
&& log_ids_ == other.log_ids_
|
|
&& min_prio_ == other.min_prio_
|
|
&& filter_tags_ == other.filter_tags_;
|
|
}
|
|
|
|
bool AndroidLogConfig::ParseFromArray(const void* raw, size_t size) {
|
|
log_ids_.clear();
|
|
filter_tags_.clear();
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* log_ids */:
|
|
log_ids_.emplace_back();
|
|
field.get(&log_ids_.back());
|
|
break;
|
|
case 3 /* min_prio */:
|
|
field.get(&min_prio_);
|
|
break;
|
|
case 4 /* filter_tags */:
|
|
filter_tags_.emplace_back();
|
|
field.get(&filter_tags_.back());
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string AndroidLogConfig::SerializeAsString() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> AndroidLogConfig::SerializeAsArray() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void AndroidLogConfig::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: log_ids
|
|
for (auto& it : log_ids_) {
|
|
msg->AppendVarInt(1, it);
|
|
}
|
|
|
|
// Field 3: min_prio
|
|
if (_has_field_[3]) {
|
|
msg->AppendVarInt(3, min_prio_);
|
|
}
|
|
|
|
// Field 4: filter_tags
|
|
for (auto& it : filter_tags_) {
|
|
msg->AppendString(4, it);
|
|
}
|
|
|
|
msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
|
|
}
|
|
|
|
} // namespace perfetto
|
|
} // namespace protos
|
|
} // namespace gen
|
|
#pragma GCC diagnostic pop
|
|
// gen_amalgamated begin source: gen/protos/perfetto/config/android/android_polled_state_config.gen.cc
|
|
// gen_amalgamated begin header: gen/protos/perfetto/config/android/android_polled_state_config.gen.h
|
|
// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
|
|
#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_CONFIG_ANDROID_ANDROID_POLLED_STATE_CONFIG_PROTO_CPP_H_
|
|
#define PERFETTO_PROTOS_PROTOS_PERFETTO_CONFIG_ANDROID_ANDROID_POLLED_STATE_CONFIG_PROTO_CPP_H_
|
|
|
|
#include <stdint.h>
|
|
#include <bitset>
|
|
#include <vector>
|
|
#include <string>
|
|
#include <type_traits>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/cpp_message_obj.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/copyable_ptr.h"
|
|
// gen_amalgamated expanded: #include "perfetto/base/export.h"
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace gen {
|
|
class AndroidPolledStateConfig;
|
|
} // namespace perfetto
|
|
} // namespace protos
|
|
} // namespace gen
|
|
|
|
namespace protozero {
|
|
class Message;
|
|
} // namespace protozero
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace gen {
|
|
|
|
class PERFETTO_EXPORT AndroidPolledStateConfig : public ::protozero::CppMessageObj {
|
|
public:
|
|
enum FieldNumbers {
|
|
kPollMsFieldNumber = 1,
|
|
};
|
|
|
|
AndroidPolledStateConfig();
|
|
~AndroidPolledStateConfig() override;
|
|
AndroidPolledStateConfig(AndroidPolledStateConfig&&) noexcept;
|
|
AndroidPolledStateConfig& operator=(AndroidPolledStateConfig&&);
|
|
AndroidPolledStateConfig(const AndroidPolledStateConfig&);
|
|
AndroidPolledStateConfig& operator=(const AndroidPolledStateConfig&);
|
|
bool operator==(const AndroidPolledStateConfig&) const;
|
|
bool operator!=(const AndroidPolledStateConfig& other) const { return !(*this == other); }
|
|
|
|
bool ParseFromArray(const void*, size_t) override;
|
|
std::string SerializeAsString() const override;
|
|
std::vector<uint8_t> SerializeAsArray() const override;
|
|
void Serialize(::protozero::Message*) const;
|
|
|
|
bool has_poll_ms() const { return _has_field_[1]; }
|
|
uint32_t poll_ms() const { return poll_ms_; }
|
|
void set_poll_ms(uint32_t value) { poll_ms_ = value; _has_field_.set(1); }
|
|
|
|
private:
|
|
uint32_t poll_ms_{};
|
|
|
|
// Allows to preserve unknown protobuf fields for compatibility
|
|
// with future versions of .proto files.
|
|
std::string unknown_fields_;
|
|
|
|
std::bitset<2> _has_field_{};
|
|
};
|
|
|
|
} // namespace perfetto
|
|
} // namespace protos
|
|
} // namespace gen
|
|
|
|
#endif // PERFETTO_PROTOS_PROTOS_PERFETTO_CONFIG_ANDROID_ANDROID_POLLED_STATE_CONFIG_PROTO_CPP_H_
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/scattered_heap_buffer.h"
|
|
// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
|
|
#pragma GCC diagnostic push
|
|
#pragma GCC diagnostic ignored "-Wfloat-equal"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/android/android_polled_state_config.gen.h"
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace gen {
|
|
|
|
AndroidPolledStateConfig::AndroidPolledStateConfig() = default;
|
|
AndroidPolledStateConfig::~AndroidPolledStateConfig() = default;
|
|
AndroidPolledStateConfig::AndroidPolledStateConfig(const AndroidPolledStateConfig&) = default;
|
|
AndroidPolledStateConfig& AndroidPolledStateConfig::operator=(const AndroidPolledStateConfig&) = default;
|
|
AndroidPolledStateConfig::AndroidPolledStateConfig(AndroidPolledStateConfig&&) noexcept = default;
|
|
AndroidPolledStateConfig& AndroidPolledStateConfig::operator=(AndroidPolledStateConfig&&) = default;
|
|
|
|
bool AndroidPolledStateConfig::operator==(const AndroidPolledStateConfig& other) const {
|
|
return unknown_fields_ == other.unknown_fields_
|
|
&& poll_ms_ == other.poll_ms_;
|
|
}
|
|
|
|
bool AndroidPolledStateConfig::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* poll_ms */:
|
|
field.get(&poll_ms_);
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string AndroidPolledStateConfig::SerializeAsString() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> AndroidPolledStateConfig::SerializeAsArray() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void AndroidPolledStateConfig::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: poll_ms
|
|
if (_has_field_[1]) {
|
|
msg->AppendVarInt(1, poll_ms_);
|
|
}
|
|
|
|
msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
|
|
}
|
|
|
|
} // namespace perfetto
|
|
} // namespace protos
|
|
} // namespace gen
|
|
#pragma GCC diagnostic pop
|
|
// gen_amalgamated begin source: gen/protos/perfetto/config/android/packages_list_config.gen.cc
|
|
// gen_amalgamated begin header: gen/protos/perfetto/config/android/packages_list_config.gen.h
|
|
// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
|
|
#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_CONFIG_ANDROID_PACKAGES_LIST_CONFIG_PROTO_CPP_H_
|
|
#define PERFETTO_PROTOS_PROTOS_PERFETTO_CONFIG_ANDROID_PACKAGES_LIST_CONFIG_PROTO_CPP_H_
|
|
|
|
#include <stdint.h>
|
|
#include <bitset>
|
|
#include <vector>
|
|
#include <string>
|
|
#include <type_traits>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/cpp_message_obj.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/copyable_ptr.h"
|
|
// gen_amalgamated expanded: #include "perfetto/base/export.h"
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace gen {
|
|
class PackagesListConfig;
|
|
} // namespace perfetto
|
|
} // namespace protos
|
|
} // namespace gen
|
|
|
|
namespace protozero {
|
|
class Message;
|
|
} // namespace protozero
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace gen {
|
|
|
|
class PERFETTO_EXPORT PackagesListConfig : public ::protozero::CppMessageObj {
|
|
public:
|
|
enum FieldNumbers {
|
|
kPackageNameFilterFieldNumber = 1,
|
|
};
|
|
|
|
PackagesListConfig();
|
|
~PackagesListConfig() override;
|
|
PackagesListConfig(PackagesListConfig&&) noexcept;
|
|
PackagesListConfig& operator=(PackagesListConfig&&);
|
|
PackagesListConfig(const PackagesListConfig&);
|
|
PackagesListConfig& operator=(const PackagesListConfig&);
|
|
bool operator==(const PackagesListConfig&) const;
|
|
bool operator!=(const PackagesListConfig& other) const { return !(*this == other); }
|
|
|
|
bool ParseFromArray(const void*, size_t) override;
|
|
std::string SerializeAsString() const override;
|
|
std::vector<uint8_t> SerializeAsArray() const override;
|
|
void Serialize(::protozero::Message*) const;
|
|
|
|
int package_name_filter_size() const { return static_cast<int>(package_name_filter_.size()); }
|
|
const std::vector<std::string>& package_name_filter() const { return package_name_filter_; }
|
|
std::vector<std::string>* mutable_package_name_filter() { return &package_name_filter_; }
|
|
void clear_package_name_filter() { package_name_filter_.clear(); }
|
|
void add_package_name_filter(std::string value) { package_name_filter_.emplace_back(value); }
|
|
std::string* add_package_name_filter() { package_name_filter_.emplace_back(); return &package_name_filter_.back(); }
|
|
|
|
private:
|
|
std::vector<std::string> package_name_filter_;
|
|
|
|
// Allows to preserve unknown protobuf fields for compatibility
|
|
// with future versions of .proto files.
|
|
std::string unknown_fields_;
|
|
|
|
std::bitset<2> _has_field_{};
|
|
};
|
|
|
|
} // namespace perfetto
|
|
} // namespace protos
|
|
} // namespace gen
|
|
|
|
#endif // PERFETTO_PROTOS_PROTOS_PERFETTO_CONFIG_ANDROID_PACKAGES_LIST_CONFIG_PROTO_CPP_H_
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/scattered_heap_buffer.h"
|
|
// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
|
|
#pragma GCC diagnostic push
|
|
#pragma GCC diagnostic ignored "-Wfloat-equal"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/android/packages_list_config.gen.h"
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace gen {
|
|
|
|
PackagesListConfig::PackagesListConfig() = default;
|
|
PackagesListConfig::~PackagesListConfig() = default;
|
|
PackagesListConfig::PackagesListConfig(const PackagesListConfig&) = default;
|
|
PackagesListConfig& PackagesListConfig::operator=(const PackagesListConfig&) = default;
|
|
PackagesListConfig::PackagesListConfig(PackagesListConfig&&) noexcept = default;
|
|
PackagesListConfig& PackagesListConfig::operator=(PackagesListConfig&&) = default;
|
|
|
|
bool PackagesListConfig::operator==(const PackagesListConfig& other) const {
|
|
return unknown_fields_ == other.unknown_fields_
|
|
&& package_name_filter_ == other.package_name_filter_;
|
|
}
|
|
|
|
bool PackagesListConfig::ParseFromArray(const void* raw, size_t size) {
|
|
package_name_filter_.clear();
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* package_name_filter */:
|
|
package_name_filter_.emplace_back();
|
|
field.get(&package_name_filter_.back());
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string PackagesListConfig::SerializeAsString() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> PackagesListConfig::SerializeAsArray() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void PackagesListConfig::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: package_name_filter
|
|
for (auto& it : package_name_filter_) {
|
|
msg->AppendString(1, it);
|
|
}
|
|
|
|
msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
|
|
}
|
|
|
|
} // namespace perfetto
|
|
} // namespace protos
|
|
} // namespace gen
|
|
#pragma GCC diagnostic pop
|
|
// gen_amalgamated begin source: gen/protos/perfetto/config/ftrace/ftrace_config.gen.cc
|
|
// gen_amalgamated begin header: gen/protos/perfetto/config/ftrace/ftrace_config.gen.h
|
|
// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
|
|
#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_CONFIG_FTRACE_FTRACE_CONFIG_PROTO_CPP_H_
|
|
#define PERFETTO_PROTOS_PROTOS_PERFETTO_CONFIG_FTRACE_FTRACE_CONFIG_PROTO_CPP_H_
|
|
|
|
#include <stdint.h>
|
|
#include <bitset>
|
|
#include <vector>
|
|
#include <string>
|
|
#include <type_traits>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/cpp_message_obj.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/copyable_ptr.h"
|
|
// gen_amalgamated expanded: #include "perfetto/base/export.h"
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace gen {
|
|
class FtraceConfig;
|
|
class FtraceConfig_CompactSchedConfig;
|
|
} // namespace perfetto
|
|
} // namespace protos
|
|
} // namespace gen
|
|
|
|
namespace protozero {
|
|
class Message;
|
|
} // namespace protozero
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace gen {
|
|
|
|
class PERFETTO_EXPORT FtraceConfig : public ::protozero::CppMessageObj {
|
|
public:
|
|
using CompactSchedConfig = FtraceConfig_CompactSchedConfig;
|
|
enum FieldNumbers {
|
|
kFtraceEventsFieldNumber = 1,
|
|
kAtraceCategoriesFieldNumber = 2,
|
|
kAtraceAppsFieldNumber = 3,
|
|
kBufferSizeKbFieldNumber = 10,
|
|
kDrainPeriodMsFieldNumber = 11,
|
|
kCompactSchedFieldNumber = 12,
|
|
};
|
|
|
|
FtraceConfig();
|
|
~FtraceConfig() override;
|
|
FtraceConfig(FtraceConfig&&) noexcept;
|
|
FtraceConfig& operator=(FtraceConfig&&);
|
|
FtraceConfig(const FtraceConfig&);
|
|
FtraceConfig& operator=(const FtraceConfig&);
|
|
bool operator==(const FtraceConfig&) const;
|
|
bool operator!=(const FtraceConfig& other) const { return !(*this == other); }
|
|
|
|
bool ParseFromArray(const void*, size_t) override;
|
|
std::string SerializeAsString() const override;
|
|
std::vector<uint8_t> SerializeAsArray() const override;
|
|
void Serialize(::protozero::Message*) const;
|
|
|
|
int ftrace_events_size() const { return static_cast<int>(ftrace_events_.size()); }
|
|
const std::vector<std::string>& ftrace_events() const { return ftrace_events_; }
|
|
std::vector<std::string>* mutable_ftrace_events() { return &ftrace_events_; }
|
|
void clear_ftrace_events() { ftrace_events_.clear(); }
|
|
void add_ftrace_events(std::string value) { ftrace_events_.emplace_back(value); }
|
|
std::string* add_ftrace_events() { ftrace_events_.emplace_back(); return &ftrace_events_.back(); }
|
|
|
|
int atrace_categories_size() const { return static_cast<int>(atrace_categories_.size()); }
|
|
const std::vector<std::string>& atrace_categories() const { return atrace_categories_; }
|
|
std::vector<std::string>* mutable_atrace_categories() { return &atrace_categories_; }
|
|
void clear_atrace_categories() { atrace_categories_.clear(); }
|
|
void add_atrace_categories(std::string value) { atrace_categories_.emplace_back(value); }
|
|
std::string* add_atrace_categories() { atrace_categories_.emplace_back(); return &atrace_categories_.back(); }
|
|
|
|
int atrace_apps_size() const { return static_cast<int>(atrace_apps_.size()); }
|
|
const std::vector<std::string>& atrace_apps() const { return atrace_apps_; }
|
|
std::vector<std::string>* mutable_atrace_apps() { return &atrace_apps_; }
|
|
void clear_atrace_apps() { atrace_apps_.clear(); }
|
|
void add_atrace_apps(std::string value) { atrace_apps_.emplace_back(value); }
|
|
std::string* add_atrace_apps() { atrace_apps_.emplace_back(); return &atrace_apps_.back(); }
|
|
|
|
bool has_buffer_size_kb() const { return _has_field_[10]; }
|
|
uint32_t buffer_size_kb() const { return buffer_size_kb_; }
|
|
void set_buffer_size_kb(uint32_t value) { buffer_size_kb_ = value; _has_field_.set(10); }
|
|
|
|
bool has_drain_period_ms() const { return _has_field_[11]; }
|
|
uint32_t drain_period_ms() const { return drain_period_ms_; }
|
|
void set_drain_period_ms(uint32_t value) { drain_period_ms_ = value; _has_field_.set(11); }
|
|
|
|
bool has_compact_sched() const { return _has_field_[12]; }
|
|
const FtraceConfig_CompactSchedConfig& compact_sched() const { return *compact_sched_; }
|
|
FtraceConfig_CompactSchedConfig* mutable_compact_sched() { _has_field_.set(12); return compact_sched_.get(); }
|
|
|
|
private:
|
|
std::vector<std::string> ftrace_events_;
|
|
std::vector<std::string> atrace_categories_;
|
|
std::vector<std::string> atrace_apps_;
|
|
uint32_t buffer_size_kb_{};
|
|
uint32_t drain_period_ms_{};
|
|
::protozero::CopyablePtr<FtraceConfig_CompactSchedConfig> compact_sched_;
|
|
|
|
// Allows to preserve unknown protobuf fields for compatibility
|
|
// with future versions of .proto files.
|
|
std::string unknown_fields_;
|
|
|
|
std::bitset<13> _has_field_{};
|
|
};
|
|
|
|
|
|
class PERFETTO_EXPORT FtraceConfig_CompactSchedConfig : public ::protozero::CppMessageObj {
|
|
public:
|
|
enum FieldNumbers {
|
|
kEnabledFieldNumber = 1,
|
|
};
|
|
|
|
FtraceConfig_CompactSchedConfig();
|
|
~FtraceConfig_CompactSchedConfig() override;
|
|
FtraceConfig_CompactSchedConfig(FtraceConfig_CompactSchedConfig&&) noexcept;
|
|
FtraceConfig_CompactSchedConfig& operator=(FtraceConfig_CompactSchedConfig&&);
|
|
FtraceConfig_CompactSchedConfig(const FtraceConfig_CompactSchedConfig&);
|
|
FtraceConfig_CompactSchedConfig& operator=(const FtraceConfig_CompactSchedConfig&);
|
|
bool operator==(const FtraceConfig_CompactSchedConfig&) const;
|
|
bool operator!=(const FtraceConfig_CompactSchedConfig& other) const { return !(*this == other); }
|
|
|
|
bool ParseFromArray(const void*, size_t) override;
|
|
std::string SerializeAsString() const override;
|
|
std::vector<uint8_t> SerializeAsArray() const override;
|
|
void Serialize(::protozero::Message*) const;
|
|
|
|
bool has_enabled() const { return _has_field_[1]; }
|
|
bool enabled() const { return enabled_; }
|
|
void set_enabled(bool value) { enabled_ = value; _has_field_.set(1); }
|
|
|
|
private:
|
|
bool enabled_{};
|
|
|
|
// Allows to preserve unknown protobuf fields for compatibility
|
|
// with future versions of .proto files.
|
|
std::string unknown_fields_;
|
|
|
|
std::bitset<2> _has_field_{};
|
|
};
|
|
|
|
} // namespace perfetto
|
|
} // namespace protos
|
|
} // namespace gen
|
|
|
|
#endif // PERFETTO_PROTOS_PROTOS_PERFETTO_CONFIG_FTRACE_FTRACE_CONFIG_PROTO_CPP_H_
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/scattered_heap_buffer.h"
|
|
// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
|
|
#pragma GCC diagnostic push
|
|
#pragma GCC diagnostic ignored "-Wfloat-equal"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/ftrace/ftrace_config.gen.h"
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace gen {
|
|
|
|
FtraceConfig::FtraceConfig() = default;
|
|
FtraceConfig::~FtraceConfig() = default;
|
|
FtraceConfig::FtraceConfig(const FtraceConfig&) = default;
|
|
FtraceConfig& FtraceConfig::operator=(const FtraceConfig&) = default;
|
|
FtraceConfig::FtraceConfig(FtraceConfig&&) noexcept = default;
|
|
FtraceConfig& FtraceConfig::operator=(FtraceConfig&&) = default;
|
|
|
|
bool FtraceConfig::operator==(const FtraceConfig& other) const {
|
|
return unknown_fields_ == other.unknown_fields_
|
|
&& ftrace_events_ == other.ftrace_events_
|
|
&& atrace_categories_ == other.atrace_categories_
|
|
&& atrace_apps_ == other.atrace_apps_
|
|
&& buffer_size_kb_ == other.buffer_size_kb_
|
|
&& drain_period_ms_ == other.drain_period_ms_
|
|
&& compact_sched_ == other.compact_sched_;
|
|
}
|
|
|
|
bool FtraceConfig::ParseFromArray(const void* raw, size_t size) {
|
|
ftrace_events_.clear();
|
|
atrace_categories_.clear();
|
|
atrace_apps_.clear();
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* ftrace_events */:
|
|
ftrace_events_.emplace_back();
|
|
field.get(&ftrace_events_.back());
|
|
break;
|
|
case 2 /* atrace_categories */:
|
|
atrace_categories_.emplace_back();
|
|
field.get(&atrace_categories_.back());
|
|
break;
|
|
case 3 /* atrace_apps */:
|
|
atrace_apps_.emplace_back();
|
|
field.get(&atrace_apps_.back());
|
|
break;
|
|
case 10 /* buffer_size_kb */:
|
|
field.get(&buffer_size_kb_);
|
|
break;
|
|
case 11 /* drain_period_ms */:
|
|
field.get(&drain_period_ms_);
|
|
break;
|
|
case 12 /* compact_sched */:
|
|
(*compact_sched_).ParseFromString(field.as_std_string());
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string FtraceConfig::SerializeAsString() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> FtraceConfig::SerializeAsArray() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void FtraceConfig::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: ftrace_events
|
|
for (auto& it : ftrace_events_) {
|
|
msg->AppendString(1, it);
|
|
}
|
|
|
|
// Field 2: atrace_categories
|
|
for (auto& it : atrace_categories_) {
|
|
msg->AppendString(2, it);
|
|
}
|
|
|
|
// Field 3: atrace_apps
|
|
for (auto& it : atrace_apps_) {
|
|
msg->AppendString(3, it);
|
|
}
|
|
|
|
// Field 10: buffer_size_kb
|
|
if (_has_field_[10]) {
|
|
msg->AppendVarInt(10, buffer_size_kb_);
|
|
}
|
|
|
|
// Field 11: drain_period_ms
|
|
if (_has_field_[11]) {
|
|
msg->AppendVarInt(11, drain_period_ms_);
|
|
}
|
|
|
|
// Field 12: compact_sched
|
|
if (_has_field_[12]) {
|
|
(*compact_sched_).Serialize(msg->BeginNestedMessage<::protozero::Message>(12));
|
|
}
|
|
|
|
msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
|
|
}
|
|
|
|
|
|
FtraceConfig_CompactSchedConfig::FtraceConfig_CompactSchedConfig() = default;
|
|
FtraceConfig_CompactSchedConfig::~FtraceConfig_CompactSchedConfig() = default;
|
|
FtraceConfig_CompactSchedConfig::FtraceConfig_CompactSchedConfig(const FtraceConfig_CompactSchedConfig&) = default;
|
|
FtraceConfig_CompactSchedConfig& FtraceConfig_CompactSchedConfig::operator=(const FtraceConfig_CompactSchedConfig&) = default;
|
|
FtraceConfig_CompactSchedConfig::FtraceConfig_CompactSchedConfig(FtraceConfig_CompactSchedConfig&&) noexcept = default;
|
|
FtraceConfig_CompactSchedConfig& FtraceConfig_CompactSchedConfig::operator=(FtraceConfig_CompactSchedConfig&&) = default;
|
|
|
|
bool FtraceConfig_CompactSchedConfig::operator==(const FtraceConfig_CompactSchedConfig& other) const {
|
|
return unknown_fields_ == other.unknown_fields_
|
|
&& enabled_ == other.enabled_;
|
|
}
|
|
|
|
bool FtraceConfig_CompactSchedConfig::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* enabled */:
|
|
field.get(&enabled_);
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string FtraceConfig_CompactSchedConfig::SerializeAsString() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> FtraceConfig_CompactSchedConfig::SerializeAsArray() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void FtraceConfig_CompactSchedConfig::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: enabled
|
|
if (_has_field_[1]) {
|
|
msg->AppendTinyVarInt(1, enabled_);
|
|
}
|
|
|
|
msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
|
|
}
|
|
|
|
} // namespace perfetto
|
|
} // namespace protos
|
|
} // namespace gen
|
|
#pragma GCC diagnostic pop
|
|
// gen_amalgamated begin source: gen/protos/perfetto/config/gpu/gpu_counter_config.gen.cc
|
|
// gen_amalgamated begin header: gen/protos/perfetto/config/gpu/gpu_counter_config.gen.h
|
|
// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
|
|
#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_CONFIG_GPU_GPU_COUNTER_CONFIG_PROTO_CPP_H_
|
|
#define PERFETTO_PROTOS_PROTOS_PERFETTO_CONFIG_GPU_GPU_COUNTER_CONFIG_PROTO_CPP_H_
|
|
|
|
#include <stdint.h>
|
|
#include <bitset>
|
|
#include <vector>
|
|
#include <string>
|
|
#include <type_traits>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/cpp_message_obj.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/copyable_ptr.h"
|
|
// gen_amalgamated expanded: #include "perfetto/base/export.h"
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace gen {
|
|
class GpuCounterConfig;
|
|
} // namespace perfetto
|
|
} // namespace protos
|
|
} // namespace gen
|
|
|
|
namespace protozero {
|
|
class Message;
|
|
} // namespace protozero
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace gen {
|
|
|
|
class PERFETTO_EXPORT GpuCounterConfig : public ::protozero::CppMessageObj {
|
|
public:
|
|
enum FieldNumbers {
|
|
kCounterPeriodNsFieldNumber = 1,
|
|
kCounterIdsFieldNumber = 2,
|
|
kInstrumentedSamplingFieldNumber = 3,
|
|
kFixGpuClockFieldNumber = 4,
|
|
};
|
|
|
|
GpuCounterConfig();
|
|
~GpuCounterConfig() override;
|
|
GpuCounterConfig(GpuCounterConfig&&) noexcept;
|
|
GpuCounterConfig& operator=(GpuCounterConfig&&);
|
|
GpuCounterConfig(const GpuCounterConfig&);
|
|
GpuCounterConfig& operator=(const GpuCounterConfig&);
|
|
bool operator==(const GpuCounterConfig&) const;
|
|
bool operator!=(const GpuCounterConfig& other) const { return !(*this == other); }
|
|
|
|
bool ParseFromArray(const void*, size_t) override;
|
|
std::string SerializeAsString() const override;
|
|
std::vector<uint8_t> SerializeAsArray() const override;
|
|
void Serialize(::protozero::Message*) const;
|
|
|
|
bool has_counter_period_ns() const { return _has_field_[1]; }
|
|
uint64_t counter_period_ns() const { return counter_period_ns_; }
|
|
void set_counter_period_ns(uint64_t value) { counter_period_ns_ = value; _has_field_.set(1); }
|
|
|
|
int counter_ids_size() const { return static_cast<int>(counter_ids_.size()); }
|
|
const std::vector<uint32_t>& counter_ids() const { return counter_ids_; }
|
|
std::vector<uint32_t>* mutable_counter_ids() { return &counter_ids_; }
|
|
void clear_counter_ids() { counter_ids_.clear(); }
|
|
void add_counter_ids(uint32_t value) { counter_ids_.emplace_back(value); }
|
|
uint32_t* add_counter_ids() { counter_ids_.emplace_back(); return &counter_ids_.back(); }
|
|
|
|
bool has_instrumented_sampling() const { return _has_field_[3]; }
|
|
bool instrumented_sampling() const { return instrumented_sampling_; }
|
|
void set_instrumented_sampling(bool value) { instrumented_sampling_ = value; _has_field_.set(3); }
|
|
|
|
bool has_fix_gpu_clock() const { return _has_field_[4]; }
|
|
bool fix_gpu_clock() const { return fix_gpu_clock_; }
|
|
void set_fix_gpu_clock(bool value) { fix_gpu_clock_ = value; _has_field_.set(4); }
|
|
|
|
private:
|
|
uint64_t counter_period_ns_{};
|
|
std::vector<uint32_t> counter_ids_;
|
|
bool instrumented_sampling_{};
|
|
bool fix_gpu_clock_{};
|
|
|
|
// Allows to preserve unknown protobuf fields for compatibility
|
|
// with future versions of .proto files.
|
|
std::string unknown_fields_;
|
|
|
|
std::bitset<5> _has_field_{};
|
|
};
|
|
|
|
} // namespace perfetto
|
|
} // namespace protos
|
|
} // namespace gen
|
|
|
|
#endif // PERFETTO_PROTOS_PROTOS_PERFETTO_CONFIG_GPU_GPU_COUNTER_CONFIG_PROTO_CPP_H_
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/scattered_heap_buffer.h"
|
|
// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
|
|
#pragma GCC diagnostic push
|
|
#pragma GCC diagnostic ignored "-Wfloat-equal"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/gpu/gpu_counter_config.gen.h"
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace gen {
|
|
|
|
GpuCounterConfig::GpuCounterConfig() = default;
|
|
GpuCounterConfig::~GpuCounterConfig() = default;
|
|
GpuCounterConfig::GpuCounterConfig(const GpuCounterConfig&) = default;
|
|
GpuCounterConfig& GpuCounterConfig::operator=(const GpuCounterConfig&) = default;
|
|
GpuCounterConfig::GpuCounterConfig(GpuCounterConfig&&) noexcept = default;
|
|
GpuCounterConfig& GpuCounterConfig::operator=(GpuCounterConfig&&) = default;
|
|
|
|
bool GpuCounterConfig::operator==(const GpuCounterConfig& other) const {
|
|
return unknown_fields_ == other.unknown_fields_
|
|
&& counter_period_ns_ == other.counter_period_ns_
|
|
&& counter_ids_ == other.counter_ids_
|
|
&& instrumented_sampling_ == other.instrumented_sampling_
|
|
&& fix_gpu_clock_ == other.fix_gpu_clock_;
|
|
}
|
|
|
|
bool GpuCounterConfig::ParseFromArray(const void* raw, size_t size) {
|
|
counter_ids_.clear();
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* counter_period_ns */:
|
|
field.get(&counter_period_ns_);
|
|
break;
|
|
case 2 /* counter_ids */:
|
|
counter_ids_.emplace_back();
|
|
field.get(&counter_ids_.back());
|
|
break;
|
|
case 3 /* instrumented_sampling */:
|
|
field.get(&instrumented_sampling_);
|
|
break;
|
|
case 4 /* fix_gpu_clock */:
|
|
field.get(&fix_gpu_clock_);
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string GpuCounterConfig::SerializeAsString() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> GpuCounterConfig::SerializeAsArray() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void GpuCounterConfig::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: counter_period_ns
|
|
if (_has_field_[1]) {
|
|
msg->AppendVarInt(1, counter_period_ns_);
|
|
}
|
|
|
|
// Field 2: counter_ids
|
|
for (auto& it : counter_ids_) {
|
|
msg->AppendVarInt(2, it);
|
|
}
|
|
|
|
// Field 3: instrumented_sampling
|
|
if (_has_field_[3]) {
|
|
msg->AppendTinyVarInt(3, instrumented_sampling_);
|
|
}
|
|
|
|
// Field 4: fix_gpu_clock
|
|
if (_has_field_[4]) {
|
|
msg->AppendTinyVarInt(4, fix_gpu_clock_);
|
|
}
|
|
|
|
msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
|
|
}
|
|
|
|
} // namespace perfetto
|
|
} // namespace protos
|
|
} // namespace gen
|
|
#pragma GCC diagnostic pop
|
|
// gen_amalgamated begin source: gen/protos/perfetto/config/gpu/vulkan_memory_config.gen.cc
|
|
// gen_amalgamated begin header: gen/protos/perfetto/config/gpu/vulkan_memory_config.gen.h
|
|
// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
|
|
#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_CONFIG_GPU_VULKAN_MEMORY_CONFIG_PROTO_CPP_H_
|
|
#define PERFETTO_PROTOS_PROTOS_PERFETTO_CONFIG_GPU_VULKAN_MEMORY_CONFIG_PROTO_CPP_H_
|
|
|
|
#include <stdint.h>
|
|
#include <bitset>
|
|
#include <vector>
|
|
#include <string>
|
|
#include <type_traits>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/cpp_message_obj.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/copyable_ptr.h"
|
|
// gen_amalgamated expanded: #include "perfetto/base/export.h"
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace gen {
|
|
class VulkanMemoryConfig;
|
|
} // namespace perfetto
|
|
} // namespace protos
|
|
} // namespace gen
|
|
|
|
namespace protozero {
|
|
class Message;
|
|
} // namespace protozero
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace gen {
|
|
|
|
class PERFETTO_EXPORT VulkanMemoryConfig : public ::protozero::CppMessageObj {
|
|
public:
|
|
enum FieldNumbers {
|
|
kTrackDriverMemoryUsageFieldNumber = 1,
|
|
kTrackDeviceMemoryUsageFieldNumber = 2,
|
|
};
|
|
|
|
VulkanMemoryConfig();
|
|
~VulkanMemoryConfig() override;
|
|
VulkanMemoryConfig(VulkanMemoryConfig&&) noexcept;
|
|
VulkanMemoryConfig& operator=(VulkanMemoryConfig&&);
|
|
VulkanMemoryConfig(const VulkanMemoryConfig&);
|
|
VulkanMemoryConfig& operator=(const VulkanMemoryConfig&);
|
|
bool operator==(const VulkanMemoryConfig&) const;
|
|
bool operator!=(const VulkanMemoryConfig& other) const { return !(*this == other); }
|
|
|
|
bool ParseFromArray(const void*, size_t) override;
|
|
std::string SerializeAsString() const override;
|
|
std::vector<uint8_t> SerializeAsArray() const override;
|
|
void Serialize(::protozero::Message*) const;
|
|
|
|
bool has_track_driver_memory_usage() const { return _has_field_[1]; }
|
|
bool track_driver_memory_usage() const { return track_driver_memory_usage_; }
|
|
void set_track_driver_memory_usage(bool value) { track_driver_memory_usage_ = value; _has_field_.set(1); }
|
|
|
|
bool has_track_device_memory_usage() const { return _has_field_[2]; }
|
|
bool track_device_memory_usage() const { return track_device_memory_usage_; }
|
|
void set_track_device_memory_usage(bool value) { track_device_memory_usage_ = value; _has_field_.set(2); }
|
|
|
|
private:
|
|
bool track_driver_memory_usage_{};
|
|
bool track_device_memory_usage_{};
|
|
|
|
// Allows to preserve unknown protobuf fields for compatibility
|
|
// with future versions of .proto files.
|
|
std::string unknown_fields_;
|
|
|
|
std::bitset<3> _has_field_{};
|
|
};
|
|
|
|
} // namespace perfetto
|
|
} // namespace protos
|
|
} // namespace gen
|
|
|
|
#endif // PERFETTO_PROTOS_PROTOS_PERFETTO_CONFIG_GPU_VULKAN_MEMORY_CONFIG_PROTO_CPP_H_
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/scattered_heap_buffer.h"
|
|
// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
|
|
#pragma GCC diagnostic push
|
|
#pragma GCC diagnostic ignored "-Wfloat-equal"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/gpu/vulkan_memory_config.gen.h"
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace gen {
|
|
|
|
VulkanMemoryConfig::VulkanMemoryConfig() = default;
|
|
VulkanMemoryConfig::~VulkanMemoryConfig() = default;
|
|
VulkanMemoryConfig::VulkanMemoryConfig(const VulkanMemoryConfig&) = default;
|
|
VulkanMemoryConfig& VulkanMemoryConfig::operator=(const VulkanMemoryConfig&) = default;
|
|
VulkanMemoryConfig::VulkanMemoryConfig(VulkanMemoryConfig&&) noexcept = default;
|
|
VulkanMemoryConfig& VulkanMemoryConfig::operator=(VulkanMemoryConfig&&) = default;
|
|
|
|
bool VulkanMemoryConfig::operator==(const VulkanMemoryConfig& other) const {
|
|
return unknown_fields_ == other.unknown_fields_
|
|
&& track_driver_memory_usage_ == other.track_driver_memory_usage_
|
|
&& track_device_memory_usage_ == other.track_device_memory_usage_;
|
|
}
|
|
|
|
bool VulkanMemoryConfig::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* track_driver_memory_usage */:
|
|
field.get(&track_driver_memory_usage_);
|
|
break;
|
|
case 2 /* track_device_memory_usage */:
|
|
field.get(&track_device_memory_usage_);
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string VulkanMemoryConfig::SerializeAsString() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> VulkanMemoryConfig::SerializeAsArray() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void VulkanMemoryConfig::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: track_driver_memory_usage
|
|
if (_has_field_[1]) {
|
|
msg->AppendTinyVarInt(1, track_driver_memory_usage_);
|
|
}
|
|
|
|
// Field 2: track_device_memory_usage
|
|
if (_has_field_[2]) {
|
|
msg->AppendTinyVarInt(2, track_device_memory_usage_);
|
|
}
|
|
|
|
msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
|
|
}
|
|
|
|
} // namespace perfetto
|
|
} // namespace protos
|
|
} // namespace gen
|
|
#pragma GCC diagnostic pop
|
|
// gen_amalgamated begin source: gen/protos/perfetto/config/inode_file/inode_file_config.gen.cc
|
|
// gen_amalgamated begin header: gen/protos/perfetto/config/inode_file/inode_file_config.gen.h
|
|
// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
|
|
#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_CONFIG_INODE_FILE_INODE_FILE_CONFIG_PROTO_CPP_H_
|
|
#define PERFETTO_PROTOS_PROTOS_PERFETTO_CONFIG_INODE_FILE_INODE_FILE_CONFIG_PROTO_CPP_H_
|
|
|
|
#include <stdint.h>
|
|
#include <bitset>
|
|
#include <vector>
|
|
#include <string>
|
|
#include <type_traits>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/cpp_message_obj.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/copyable_ptr.h"
|
|
// gen_amalgamated expanded: #include "perfetto/base/export.h"
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace gen {
|
|
class InodeFileConfig;
|
|
class InodeFileConfig_MountPointMappingEntry;
|
|
} // namespace perfetto
|
|
} // namespace protos
|
|
} // namespace gen
|
|
|
|
namespace protozero {
|
|
class Message;
|
|
} // namespace protozero
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace gen {
|
|
|
|
class PERFETTO_EXPORT InodeFileConfig : public ::protozero::CppMessageObj {
|
|
public:
|
|
using MountPointMappingEntry = InodeFileConfig_MountPointMappingEntry;
|
|
enum FieldNumbers {
|
|
kScanIntervalMsFieldNumber = 1,
|
|
kScanDelayMsFieldNumber = 2,
|
|
kScanBatchSizeFieldNumber = 3,
|
|
kDoNotScanFieldNumber = 4,
|
|
kScanMountPointsFieldNumber = 5,
|
|
kMountPointMappingFieldNumber = 6,
|
|
};
|
|
|
|
InodeFileConfig();
|
|
~InodeFileConfig() override;
|
|
InodeFileConfig(InodeFileConfig&&) noexcept;
|
|
InodeFileConfig& operator=(InodeFileConfig&&);
|
|
InodeFileConfig(const InodeFileConfig&);
|
|
InodeFileConfig& operator=(const InodeFileConfig&);
|
|
bool operator==(const InodeFileConfig&) const;
|
|
bool operator!=(const InodeFileConfig& other) const { return !(*this == other); }
|
|
|
|
bool ParseFromArray(const void*, size_t) override;
|
|
std::string SerializeAsString() const override;
|
|
std::vector<uint8_t> SerializeAsArray() const override;
|
|
void Serialize(::protozero::Message*) const;
|
|
|
|
bool has_scan_interval_ms() const { return _has_field_[1]; }
|
|
uint32_t scan_interval_ms() const { return scan_interval_ms_; }
|
|
void set_scan_interval_ms(uint32_t value) { scan_interval_ms_ = value; _has_field_.set(1); }
|
|
|
|
bool has_scan_delay_ms() const { return _has_field_[2]; }
|
|
uint32_t scan_delay_ms() const { return scan_delay_ms_; }
|
|
void set_scan_delay_ms(uint32_t value) { scan_delay_ms_ = value; _has_field_.set(2); }
|
|
|
|
bool has_scan_batch_size() const { return _has_field_[3]; }
|
|
uint32_t scan_batch_size() const { return scan_batch_size_; }
|
|
void set_scan_batch_size(uint32_t value) { scan_batch_size_ = value; _has_field_.set(3); }
|
|
|
|
bool has_do_not_scan() const { return _has_field_[4]; }
|
|
bool do_not_scan() const { return do_not_scan_; }
|
|
void set_do_not_scan(bool value) { do_not_scan_ = value; _has_field_.set(4); }
|
|
|
|
int scan_mount_points_size() const { return static_cast<int>(scan_mount_points_.size()); }
|
|
const std::vector<std::string>& scan_mount_points() const { return scan_mount_points_; }
|
|
std::vector<std::string>* mutable_scan_mount_points() { return &scan_mount_points_; }
|
|
void clear_scan_mount_points() { scan_mount_points_.clear(); }
|
|
void add_scan_mount_points(std::string value) { scan_mount_points_.emplace_back(value); }
|
|
std::string* add_scan_mount_points() { scan_mount_points_.emplace_back(); return &scan_mount_points_.back(); }
|
|
|
|
int mount_point_mapping_size() const { return static_cast<int>(mount_point_mapping_.size()); }
|
|
const std::vector<InodeFileConfig_MountPointMappingEntry>& mount_point_mapping() const { return mount_point_mapping_; }
|
|
std::vector<InodeFileConfig_MountPointMappingEntry>* mutable_mount_point_mapping() { return &mount_point_mapping_; }
|
|
void clear_mount_point_mapping() { mount_point_mapping_.clear(); }
|
|
InodeFileConfig_MountPointMappingEntry* add_mount_point_mapping() { mount_point_mapping_.emplace_back(); return &mount_point_mapping_.back(); }
|
|
|
|
private:
|
|
uint32_t scan_interval_ms_{};
|
|
uint32_t scan_delay_ms_{};
|
|
uint32_t scan_batch_size_{};
|
|
bool do_not_scan_{};
|
|
std::vector<std::string> scan_mount_points_;
|
|
std::vector<InodeFileConfig_MountPointMappingEntry> mount_point_mapping_;
|
|
|
|
// Allows to preserve unknown protobuf fields for compatibility
|
|
// with future versions of .proto files.
|
|
std::string unknown_fields_;
|
|
|
|
std::bitset<7> _has_field_{};
|
|
};
|
|
|
|
|
|
class PERFETTO_EXPORT InodeFileConfig_MountPointMappingEntry : public ::protozero::CppMessageObj {
|
|
public:
|
|
enum FieldNumbers {
|
|
kMountpointFieldNumber = 1,
|
|
kScanRootsFieldNumber = 2,
|
|
};
|
|
|
|
InodeFileConfig_MountPointMappingEntry();
|
|
~InodeFileConfig_MountPointMappingEntry() override;
|
|
InodeFileConfig_MountPointMappingEntry(InodeFileConfig_MountPointMappingEntry&&) noexcept;
|
|
InodeFileConfig_MountPointMappingEntry& operator=(InodeFileConfig_MountPointMappingEntry&&);
|
|
InodeFileConfig_MountPointMappingEntry(const InodeFileConfig_MountPointMappingEntry&);
|
|
InodeFileConfig_MountPointMappingEntry& operator=(const InodeFileConfig_MountPointMappingEntry&);
|
|
bool operator==(const InodeFileConfig_MountPointMappingEntry&) const;
|
|
bool operator!=(const InodeFileConfig_MountPointMappingEntry& other) const { return !(*this == other); }
|
|
|
|
bool ParseFromArray(const void*, size_t) override;
|
|
std::string SerializeAsString() const override;
|
|
std::vector<uint8_t> SerializeAsArray() const override;
|
|
void Serialize(::protozero::Message*) const;
|
|
|
|
bool has_mountpoint() const { return _has_field_[1]; }
|
|
const std::string& mountpoint() const { return mountpoint_; }
|
|
void set_mountpoint(const std::string& value) { mountpoint_ = value; _has_field_.set(1); }
|
|
|
|
int scan_roots_size() const { return static_cast<int>(scan_roots_.size()); }
|
|
const std::vector<std::string>& scan_roots() const { return scan_roots_; }
|
|
std::vector<std::string>* mutable_scan_roots() { return &scan_roots_; }
|
|
void clear_scan_roots() { scan_roots_.clear(); }
|
|
void add_scan_roots(std::string value) { scan_roots_.emplace_back(value); }
|
|
std::string* add_scan_roots() { scan_roots_.emplace_back(); return &scan_roots_.back(); }
|
|
|
|
private:
|
|
std::string mountpoint_{};
|
|
std::vector<std::string> scan_roots_;
|
|
|
|
// Allows to preserve unknown protobuf fields for compatibility
|
|
// with future versions of .proto files.
|
|
std::string unknown_fields_;
|
|
|
|
std::bitset<3> _has_field_{};
|
|
};
|
|
|
|
} // namespace perfetto
|
|
} // namespace protos
|
|
} // namespace gen
|
|
|
|
#endif // PERFETTO_PROTOS_PROTOS_PERFETTO_CONFIG_INODE_FILE_INODE_FILE_CONFIG_PROTO_CPP_H_
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/scattered_heap_buffer.h"
|
|
// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
|
|
#pragma GCC diagnostic push
|
|
#pragma GCC diagnostic ignored "-Wfloat-equal"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/inode_file/inode_file_config.gen.h"
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace gen {
|
|
|
|
InodeFileConfig::InodeFileConfig() = default;
|
|
InodeFileConfig::~InodeFileConfig() = default;
|
|
InodeFileConfig::InodeFileConfig(const InodeFileConfig&) = default;
|
|
InodeFileConfig& InodeFileConfig::operator=(const InodeFileConfig&) = default;
|
|
InodeFileConfig::InodeFileConfig(InodeFileConfig&&) noexcept = default;
|
|
InodeFileConfig& InodeFileConfig::operator=(InodeFileConfig&&) = default;
|
|
|
|
bool InodeFileConfig::operator==(const InodeFileConfig& other) const {
|
|
return unknown_fields_ == other.unknown_fields_
|
|
&& scan_interval_ms_ == other.scan_interval_ms_
|
|
&& scan_delay_ms_ == other.scan_delay_ms_
|
|
&& scan_batch_size_ == other.scan_batch_size_
|
|
&& do_not_scan_ == other.do_not_scan_
|
|
&& scan_mount_points_ == other.scan_mount_points_
|
|
&& mount_point_mapping_ == other.mount_point_mapping_;
|
|
}
|
|
|
|
bool InodeFileConfig::ParseFromArray(const void* raw, size_t size) {
|
|
scan_mount_points_.clear();
|
|
mount_point_mapping_.clear();
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* scan_interval_ms */:
|
|
field.get(&scan_interval_ms_);
|
|
break;
|
|
case 2 /* scan_delay_ms */:
|
|
field.get(&scan_delay_ms_);
|
|
break;
|
|
case 3 /* scan_batch_size */:
|
|
field.get(&scan_batch_size_);
|
|
break;
|
|
case 4 /* do_not_scan */:
|
|
field.get(&do_not_scan_);
|
|
break;
|
|
case 5 /* scan_mount_points */:
|
|
scan_mount_points_.emplace_back();
|
|
field.get(&scan_mount_points_.back());
|
|
break;
|
|
case 6 /* mount_point_mapping */:
|
|
mount_point_mapping_.emplace_back();
|
|
mount_point_mapping_.back().ParseFromString(field.as_std_string());
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string InodeFileConfig::SerializeAsString() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> InodeFileConfig::SerializeAsArray() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void InodeFileConfig::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: scan_interval_ms
|
|
if (_has_field_[1]) {
|
|
msg->AppendVarInt(1, scan_interval_ms_);
|
|
}
|
|
|
|
// Field 2: scan_delay_ms
|
|
if (_has_field_[2]) {
|
|
msg->AppendVarInt(2, scan_delay_ms_);
|
|
}
|
|
|
|
// Field 3: scan_batch_size
|
|
if (_has_field_[3]) {
|
|
msg->AppendVarInt(3, scan_batch_size_);
|
|
}
|
|
|
|
// Field 4: do_not_scan
|
|
if (_has_field_[4]) {
|
|
msg->AppendTinyVarInt(4, do_not_scan_);
|
|
}
|
|
|
|
// Field 5: scan_mount_points
|
|
for (auto& it : scan_mount_points_) {
|
|
msg->AppendString(5, it);
|
|
}
|
|
|
|
// Field 6: mount_point_mapping
|
|
for (auto& it : mount_point_mapping_) {
|
|
it.Serialize(msg->BeginNestedMessage<::protozero::Message>(6));
|
|
}
|
|
|
|
msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
|
|
}
|
|
|
|
|
|
InodeFileConfig_MountPointMappingEntry::InodeFileConfig_MountPointMappingEntry() = default;
|
|
InodeFileConfig_MountPointMappingEntry::~InodeFileConfig_MountPointMappingEntry() = default;
|
|
InodeFileConfig_MountPointMappingEntry::InodeFileConfig_MountPointMappingEntry(const InodeFileConfig_MountPointMappingEntry&) = default;
|
|
InodeFileConfig_MountPointMappingEntry& InodeFileConfig_MountPointMappingEntry::operator=(const InodeFileConfig_MountPointMappingEntry&) = default;
|
|
InodeFileConfig_MountPointMappingEntry::InodeFileConfig_MountPointMappingEntry(InodeFileConfig_MountPointMappingEntry&&) noexcept = default;
|
|
InodeFileConfig_MountPointMappingEntry& InodeFileConfig_MountPointMappingEntry::operator=(InodeFileConfig_MountPointMappingEntry&&) = default;
|
|
|
|
bool InodeFileConfig_MountPointMappingEntry::operator==(const InodeFileConfig_MountPointMappingEntry& other) const {
|
|
return unknown_fields_ == other.unknown_fields_
|
|
&& mountpoint_ == other.mountpoint_
|
|
&& scan_roots_ == other.scan_roots_;
|
|
}
|
|
|
|
bool InodeFileConfig_MountPointMappingEntry::ParseFromArray(const void* raw, size_t size) {
|
|
scan_roots_.clear();
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* mountpoint */:
|
|
field.get(&mountpoint_);
|
|
break;
|
|
case 2 /* scan_roots */:
|
|
scan_roots_.emplace_back();
|
|
field.get(&scan_roots_.back());
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string InodeFileConfig_MountPointMappingEntry::SerializeAsString() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> InodeFileConfig_MountPointMappingEntry::SerializeAsArray() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void InodeFileConfig_MountPointMappingEntry::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: mountpoint
|
|
if (_has_field_[1]) {
|
|
msg->AppendString(1, mountpoint_);
|
|
}
|
|
|
|
// Field 2: scan_roots
|
|
for (auto& it : scan_roots_) {
|
|
msg->AppendString(2, it);
|
|
}
|
|
|
|
msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
|
|
}
|
|
|
|
} // namespace perfetto
|
|
} // namespace protos
|
|
} // namespace gen
|
|
#pragma GCC diagnostic pop
|
|
// gen_amalgamated begin source: gen/protos/perfetto/config/power/android_power_config.gen.cc
|
|
// gen_amalgamated begin header: gen/protos/perfetto/config/power/android_power_config.gen.h
|
|
// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
|
|
#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_CONFIG_POWER_ANDROID_POWER_CONFIG_PROTO_CPP_H_
|
|
#define PERFETTO_PROTOS_PROTOS_PERFETTO_CONFIG_POWER_ANDROID_POWER_CONFIG_PROTO_CPP_H_
|
|
|
|
#include <stdint.h>
|
|
#include <bitset>
|
|
#include <vector>
|
|
#include <string>
|
|
#include <type_traits>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/cpp_message_obj.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/copyable_ptr.h"
|
|
// gen_amalgamated expanded: #include "perfetto/base/export.h"
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace gen {
|
|
class AndroidPowerConfig;
|
|
enum AndroidPowerConfig_BatteryCounters : int;
|
|
} // namespace perfetto
|
|
} // namespace protos
|
|
} // namespace gen
|
|
|
|
namespace protozero {
|
|
class Message;
|
|
} // namespace protozero
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace gen {
|
|
enum AndroidPowerConfig_BatteryCounters : int {
|
|
AndroidPowerConfig_BatteryCounters_BATTERY_COUNTER_UNSPECIFIED = 0,
|
|
AndroidPowerConfig_BatteryCounters_BATTERY_COUNTER_CHARGE = 1,
|
|
AndroidPowerConfig_BatteryCounters_BATTERY_COUNTER_CAPACITY_PERCENT = 2,
|
|
AndroidPowerConfig_BatteryCounters_BATTERY_COUNTER_CURRENT = 3,
|
|
AndroidPowerConfig_BatteryCounters_BATTERY_COUNTER_CURRENT_AVG = 4,
|
|
};
|
|
|
|
class PERFETTO_EXPORT AndroidPowerConfig : public ::protozero::CppMessageObj {
|
|
public:
|
|
using BatteryCounters = AndroidPowerConfig_BatteryCounters;
|
|
static constexpr auto BATTERY_COUNTER_UNSPECIFIED = AndroidPowerConfig_BatteryCounters_BATTERY_COUNTER_UNSPECIFIED;
|
|
static constexpr auto BATTERY_COUNTER_CHARGE = AndroidPowerConfig_BatteryCounters_BATTERY_COUNTER_CHARGE;
|
|
static constexpr auto BATTERY_COUNTER_CAPACITY_PERCENT = AndroidPowerConfig_BatteryCounters_BATTERY_COUNTER_CAPACITY_PERCENT;
|
|
static constexpr auto BATTERY_COUNTER_CURRENT = AndroidPowerConfig_BatteryCounters_BATTERY_COUNTER_CURRENT;
|
|
static constexpr auto BATTERY_COUNTER_CURRENT_AVG = AndroidPowerConfig_BatteryCounters_BATTERY_COUNTER_CURRENT_AVG;
|
|
static constexpr auto BatteryCounters_MIN = AndroidPowerConfig_BatteryCounters_BATTERY_COUNTER_UNSPECIFIED;
|
|
static constexpr auto BatteryCounters_MAX = AndroidPowerConfig_BatteryCounters_BATTERY_COUNTER_CURRENT_AVG;
|
|
enum FieldNumbers {
|
|
kBatteryPollMsFieldNumber = 1,
|
|
kBatteryCountersFieldNumber = 2,
|
|
kCollectPowerRailsFieldNumber = 3,
|
|
};
|
|
|
|
AndroidPowerConfig();
|
|
~AndroidPowerConfig() override;
|
|
AndroidPowerConfig(AndroidPowerConfig&&) noexcept;
|
|
AndroidPowerConfig& operator=(AndroidPowerConfig&&);
|
|
AndroidPowerConfig(const AndroidPowerConfig&);
|
|
AndroidPowerConfig& operator=(const AndroidPowerConfig&);
|
|
bool operator==(const AndroidPowerConfig&) const;
|
|
bool operator!=(const AndroidPowerConfig& other) const { return !(*this == other); }
|
|
|
|
bool ParseFromArray(const void*, size_t) override;
|
|
std::string SerializeAsString() const override;
|
|
std::vector<uint8_t> SerializeAsArray() const override;
|
|
void Serialize(::protozero::Message*) const;
|
|
|
|
bool has_battery_poll_ms() const { return _has_field_[1]; }
|
|
uint32_t battery_poll_ms() const { return battery_poll_ms_; }
|
|
void set_battery_poll_ms(uint32_t value) { battery_poll_ms_ = value; _has_field_.set(1); }
|
|
|
|
int battery_counters_size() const { return static_cast<int>(battery_counters_.size()); }
|
|
const std::vector<AndroidPowerConfig_BatteryCounters>& battery_counters() const { return battery_counters_; }
|
|
std::vector<AndroidPowerConfig_BatteryCounters>* mutable_battery_counters() { return &battery_counters_; }
|
|
void clear_battery_counters() { battery_counters_.clear(); }
|
|
void add_battery_counters(AndroidPowerConfig_BatteryCounters value) { battery_counters_.emplace_back(value); }
|
|
AndroidPowerConfig_BatteryCounters* add_battery_counters() { battery_counters_.emplace_back(); return &battery_counters_.back(); }
|
|
|
|
bool has_collect_power_rails() const { return _has_field_[3]; }
|
|
bool collect_power_rails() const { return collect_power_rails_; }
|
|
void set_collect_power_rails(bool value) { collect_power_rails_ = value; _has_field_.set(3); }
|
|
|
|
private:
|
|
uint32_t battery_poll_ms_{};
|
|
std::vector<AndroidPowerConfig_BatteryCounters> battery_counters_;
|
|
bool collect_power_rails_{};
|
|
|
|
// Allows to preserve unknown protobuf fields for compatibility
|
|
// with future versions of .proto files.
|
|
std::string unknown_fields_;
|
|
|
|
std::bitset<4> _has_field_{};
|
|
};
|
|
|
|
} // namespace perfetto
|
|
} // namespace protos
|
|
} // namespace gen
|
|
|
|
#endif // PERFETTO_PROTOS_PROTOS_PERFETTO_CONFIG_POWER_ANDROID_POWER_CONFIG_PROTO_CPP_H_
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/scattered_heap_buffer.h"
|
|
// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
|
|
#pragma GCC diagnostic push
|
|
#pragma GCC diagnostic ignored "-Wfloat-equal"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/power/android_power_config.gen.h"
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace gen {
|
|
|
|
AndroidPowerConfig::AndroidPowerConfig() = default;
|
|
AndroidPowerConfig::~AndroidPowerConfig() = default;
|
|
AndroidPowerConfig::AndroidPowerConfig(const AndroidPowerConfig&) = default;
|
|
AndroidPowerConfig& AndroidPowerConfig::operator=(const AndroidPowerConfig&) = default;
|
|
AndroidPowerConfig::AndroidPowerConfig(AndroidPowerConfig&&) noexcept = default;
|
|
AndroidPowerConfig& AndroidPowerConfig::operator=(AndroidPowerConfig&&) = default;
|
|
|
|
bool AndroidPowerConfig::operator==(const AndroidPowerConfig& other) const {
|
|
return unknown_fields_ == other.unknown_fields_
|
|
&& battery_poll_ms_ == other.battery_poll_ms_
|
|
&& battery_counters_ == other.battery_counters_
|
|
&& collect_power_rails_ == other.collect_power_rails_;
|
|
}
|
|
|
|
bool AndroidPowerConfig::ParseFromArray(const void* raw, size_t size) {
|
|
battery_counters_.clear();
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* battery_poll_ms */:
|
|
field.get(&battery_poll_ms_);
|
|
break;
|
|
case 2 /* battery_counters */:
|
|
battery_counters_.emplace_back();
|
|
field.get(&battery_counters_.back());
|
|
break;
|
|
case 3 /* collect_power_rails */:
|
|
field.get(&collect_power_rails_);
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string AndroidPowerConfig::SerializeAsString() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> AndroidPowerConfig::SerializeAsArray() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void AndroidPowerConfig::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: battery_poll_ms
|
|
if (_has_field_[1]) {
|
|
msg->AppendVarInt(1, battery_poll_ms_);
|
|
}
|
|
|
|
// Field 2: battery_counters
|
|
for (auto& it : battery_counters_) {
|
|
msg->AppendVarInt(2, it);
|
|
}
|
|
|
|
// Field 3: collect_power_rails
|
|
if (_has_field_[3]) {
|
|
msg->AppendTinyVarInt(3, collect_power_rails_);
|
|
}
|
|
|
|
msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
|
|
}
|
|
|
|
} // namespace perfetto
|
|
} // namespace protos
|
|
} // namespace gen
|
|
#pragma GCC diagnostic pop
|
|
// gen_amalgamated begin source: gen/protos/perfetto/config/process_stats/process_stats_config.gen.cc
|
|
// gen_amalgamated begin header: gen/protos/perfetto/config/process_stats/process_stats_config.gen.h
|
|
// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
|
|
#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_CONFIG_PROCESS_STATS_PROCESS_STATS_CONFIG_PROTO_CPP_H_
|
|
#define PERFETTO_PROTOS_PROTOS_PERFETTO_CONFIG_PROCESS_STATS_PROCESS_STATS_CONFIG_PROTO_CPP_H_
|
|
|
|
#include <stdint.h>
|
|
#include <bitset>
|
|
#include <vector>
|
|
#include <string>
|
|
#include <type_traits>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/cpp_message_obj.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/copyable_ptr.h"
|
|
// gen_amalgamated expanded: #include "perfetto/base/export.h"
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace gen {
|
|
class ProcessStatsConfig;
|
|
enum ProcessStatsConfig_Quirks : int;
|
|
} // namespace perfetto
|
|
} // namespace protos
|
|
} // namespace gen
|
|
|
|
namespace protozero {
|
|
class Message;
|
|
} // namespace protozero
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace gen {
|
|
enum ProcessStatsConfig_Quirks : int {
|
|
ProcessStatsConfig_Quirks_QUIRKS_UNSPECIFIED = 0,
|
|
ProcessStatsConfig_Quirks_DISABLE_INITIAL_DUMP = 1,
|
|
ProcessStatsConfig_Quirks_DISABLE_ON_DEMAND = 2,
|
|
};
|
|
|
|
class PERFETTO_EXPORT ProcessStatsConfig : public ::protozero::CppMessageObj {
|
|
public:
|
|
using Quirks = ProcessStatsConfig_Quirks;
|
|
static constexpr auto QUIRKS_UNSPECIFIED = ProcessStatsConfig_Quirks_QUIRKS_UNSPECIFIED;
|
|
static constexpr auto DISABLE_INITIAL_DUMP = ProcessStatsConfig_Quirks_DISABLE_INITIAL_DUMP;
|
|
static constexpr auto DISABLE_ON_DEMAND = ProcessStatsConfig_Quirks_DISABLE_ON_DEMAND;
|
|
static constexpr auto Quirks_MIN = ProcessStatsConfig_Quirks_QUIRKS_UNSPECIFIED;
|
|
static constexpr auto Quirks_MAX = ProcessStatsConfig_Quirks_DISABLE_ON_DEMAND;
|
|
enum FieldNumbers {
|
|
kQuirksFieldNumber = 1,
|
|
kScanAllProcessesOnStartFieldNumber = 2,
|
|
kRecordThreadNamesFieldNumber = 3,
|
|
kProcStatsPollMsFieldNumber = 4,
|
|
kProcStatsCacheTtlMsFieldNumber = 6,
|
|
kRecordThreadTimeInStateFieldNumber = 7,
|
|
kThreadTimeInStateCacheSizeFieldNumber = 8,
|
|
};
|
|
|
|
ProcessStatsConfig();
|
|
~ProcessStatsConfig() override;
|
|
ProcessStatsConfig(ProcessStatsConfig&&) noexcept;
|
|
ProcessStatsConfig& operator=(ProcessStatsConfig&&);
|
|
ProcessStatsConfig(const ProcessStatsConfig&);
|
|
ProcessStatsConfig& operator=(const ProcessStatsConfig&);
|
|
bool operator==(const ProcessStatsConfig&) const;
|
|
bool operator!=(const ProcessStatsConfig& other) const { return !(*this == other); }
|
|
|
|
bool ParseFromArray(const void*, size_t) override;
|
|
std::string SerializeAsString() const override;
|
|
std::vector<uint8_t> SerializeAsArray() const override;
|
|
void Serialize(::protozero::Message*) const;
|
|
|
|
int quirks_size() const { return static_cast<int>(quirks_.size()); }
|
|
const std::vector<ProcessStatsConfig_Quirks>& quirks() const { return quirks_; }
|
|
std::vector<ProcessStatsConfig_Quirks>* mutable_quirks() { return &quirks_; }
|
|
void clear_quirks() { quirks_.clear(); }
|
|
void add_quirks(ProcessStatsConfig_Quirks value) { quirks_.emplace_back(value); }
|
|
ProcessStatsConfig_Quirks* add_quirks() { quirks_.emplace_back(); return &quirks_.back(); }
|
|
|
|
bool has_scan_all_processes_on_start() const { return _has_field_[2]; }
|
|
bool scan_all_processes_on_start() const { return scan_all_processes_on_start_; }
|
|
void set_scan_all_processes_on_start(bool value) { scan_all_processes_on_start_ = value; _has_field_.set(2); }
|
|
|
|
bool has_record_thread_names() const { return _has_field_[3]; }
|
|
bool record_thread_names() const { return record_thread_names_; }
|
|
void set_record_thread_names(bool value) { record_thread_names_ = value; _has_field_.set(3); }
|
|
|
|
bool has_proc_stats_poll_ms() const { return _has_field_[4]; }
|
|
uint32_t proc_stats_poll_ms() const { return proc_stats_poll_ms_; }
|
|
void set_proc_stats_poll_ms(uint32_t value) { proc_stats_poll_ms_ = value; _has_field_.set(4); }
|
|
|
|
bool has_proc_stats_cache_ttl_ms() const { return _has_field_[6]; }
|
|
uint32_t proc_stats_cache_ttl_ms() const { return proc_stats_cache_ttl_ms_; }
|
|
void set_proc_stats_cache_ttl_ms(uint32_t value) { proc_stats_cache_ttl_ms_ = value; _has_field_.set(6); }
|
|
|
|
bool has_record_thread_time_in_state() const { return _has_field_[7]; }
|
|
bool record_thread_time_in_state() const { return record_thread_time_in_state_; }
|
|
void set_record_thread_time_in_state(bool value) { record_thread_time_in_state_ = value; _has_field_.set(7); }
|
|
|
|
bool has_thread_time_in_state_cache_size() const { return _has_field_[8]; }
|
|
uint32_t thread_time_in_state_cache_size() const { return thread_time_in_state_cache_size_; }
|
|
void set_thread_time_in_state_cache_size(uint32_t value) { thread_time_in_state_cache_size_ = value; _has_field_.set(8); }
|
|
|
|
private:
|
|
std::vector<ProcessStatsConfig_Quirks> quirks_;
|
|
bool scan_all_processes_on_start_{};
|
|
bool record_thread_names_{};
|
|
uint32_t proc_stats_poll_ms_{};
|
|
uint32_t proc_stats_cache_ttl_ms_{};
|
|
bool record_thread_time_in_state_{};
|
|
uint32_t thread_time_in_state_cache_size_{};
|
|
|
|
// Allows to preserve unknown protobuf fields for compatibility
|
|
// with future versions of .proto files.
|
|
std::string unknown_fields_;
|
|
|
|
std::bitset<9> _has_field_{};
|
|
};
|
|
|
|
} // namespace perfetto
|
|
} // namespace protos
|
|
} // namespace gen
|
|
|
|
#endif // PERFETTO_PROTOS_PROTOS_PERFETTO_CONFIG_PROCESS_STATS_PROCESS_STATS_CONFIG_PROTO_CPP_H_
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/scattered_heap_buffer.h"
|
|
// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
|
|
#pragma GCC diagnostic push
|
|
#pragma GCC diagnostic ignored "-Wfloat-equal"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/process_stats/process_stats_config.gen.h"
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace gen {
|
|
|
|
ProcessStatsConfig::ProcessStatsConfig() = default;
|
|
ProcessStatsConfig::~ProcessStatsConfig() = default;
|
|
ProcessStatsConfig::ProcessStatsConfig(const ProcessStatsConfig&) = default;
|
|
ProcessStatsConfig& ProcessStatsConfig::operator=(const ProcessStatsConfig&) = default;
|
|
ProcessStatsConfig::ProcessStatsConfig(ProcessStatsConfig&&) noexcept = default;
|
|
ProcessStatsConfig& ProcessStatsConfig::operator=(ProcessStatsConfig&&) = default;
|
|
|
|
bool ProcessStatsConfig::operator==(const ProcessStatsConfig& other) const {
|
|
return unknown_fields_ == other.unknown_fields_
|
|
&& quirks_ == other.quirks_
|
|
&& scan_all_processes_on_start_ == other.scan_all_processes_on_start_
|
|
&& record_thread_names_ == other.record_thread_names_
|
|
&& proc_stats_poll_ms_ == other.proc_stats_poll_ms_
|
|
&& proc_stats_cache_ttl_ms_ == other.proc_stats_cache_ttl_ms_
|
|
&& record_thread_time_in_state_ == other.record_thread_time_in_state_
|
|
&& thread_time_in_state_cache_size_ == other.thread_time_in_state_cache_size_;
|
|
}
|
|
|
|
bool ProcessStatsConfig::ParseFromArray(const void* raw, size_t size) {
|
|
quirks_.clear();
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* quirks */:
|
|
quirks_.emplace_back();
|
|
field.get(&quirks_.back());
|
|
break;
|
|
case 2 /* scan_all_processes_on_start */:
|
|
field.get(&scan_all_processes_on_start_);
|
|
break;
|
|
case 3 /* record_thread_names */:
|
|
field.get(&record_thread_names_);
|
|
break;
|
|
case 4 /* proc_stats_poll_ms */:
|
|
field.get(&proc_stats_poll_ms_);
|
|
break;
|
|
case 6 /* proc_stats_cache_ttl_ms */:
|
|
field.get(&proc_stats_cache_ttl_ms_);
|
|
break;
|
|
case 7 /* record_thread_time_in_state */:
|
|
field.get(&record_thread_time_in_state_);
|
|
break;
|
|
case 8 /* thread_time_in_state_cache_size */:
|
|
field.get(&thread_time_in_state_cache_size_);
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string ProcessStatsConfig::SerializeAsString() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> ProcessStatsConfig::SerializeAsArray() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void ProcessStatsConfig::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: quirks
|
|
for (auto& it : quirks_) {
|
|
msg->AppendVarInt(1, it);
|
|
}
|
|
|
|
// Field 2: scan_all_processes_on_start
|
|
if (_has_field_[2]) {
|
|
msg->AppendTinyVarInt(2, scan_all_processes_on_start_);
|
|
}
|
|
|
|
// Field 3: record_thread_names
|
|
if (_has_field_[3]) {
|
|
msg->AppendTinyVarInt(3, record_thread_names_);
|
|
}
|
|
|
|
// Field 4: proc_stats_poll_ms
|
|
if (_has_field_[4]) {
|
|
msg->AppendVarInt(4, proc_stats_poll_ms_);
|
|
}
|
|
|
|
// Field 6: proc_stats_cache_ttl_ms
|
|
if (_has_field_[6]) {
|
|
msg->AppendVarInt(6, proc_stats_cache_ttl_ms_);
|
|
}
|
|
|
|
// Field 7: record_thread_time_in_state
|
|
if (_has_field_[7]) {
|
|
msg->AppendTinyVarInt(7, record_thread_time_in_state_);
|
|
}
|
|
|
|
// Field 8: thread_time_in_state_cache_size
|
|
if (_has_field_[8]) {
|
|
msg->AppendVarInt(8, thread_time_in_state_cache_size_);
|
|
}
|
|
|
|
msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
|
|
}
|
|
|
|
} // namespace perfetto
|
|
} // namespace protos
|
|
} // namespace gen
|
|
#pragma GCC diagnostic pop
|
|
// gen_amalgamated begin source: gen/protos/perfetto/config/profiling/heapprofd_config.gen.cc
|
|
// gen_amalgamated begin header: gen/protos/perfetto/config/profiling/heapprofd_config.gen.h
|
|
// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
|
|
#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_CONFIG_PROFILING_HEAPPROFD_CONFIG_PROTO_CPP_H_
|
|
#define PERFETTO_PROTOS_PROTOS_PERFETTO_CONFIG_PROFILING_HEAPPROFD_CONFIG_PROTO_CPP_H_
|
|
|
|
#include <stdint.h>
|
|
#include <bitset>
|
|
#include <vector>
|
|
#include <string>
|
|
#include <type_traits>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/cpp_message_obj.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/copyable_ptr.h"
|
|
// gen_amalgamated expanded: #include "perfetto/base/export.h"
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace gen {
|
|
class HeapprofdConfig;
|
|
class HeapprofdConfig_ContinuousDumpConfig;
|
|
} // namespace perfetto
|
|
} // namespace protos
|
|
} // namespace gen
|
|
|
|
namespace protozero {
|
|
class Message;
|
|
} // namespace protozero
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace gen {
|
|
|
|
class PERFETTO_EXPORT HeapprofdConfig : public ::protozero::CppMessageObj {
|
|
public:
|
|
using ContinuousDumpConfig = HeapprofdConfig_ContinuousDumpConfig;
|
|
enum FieldNumbers {
|
|
kSamplingIntervalBytesFieldNumber = 1,
|
|
kProcessCmdlineFieldNumber = 2,
|
|
kPidFieldNumber = 4,
|
|
kHeapsFieldNumber = 20,
|
|
kAllHeapsFieldNumber = 21,
|
|
kAllFieldNumber = 5,
|
|
kMinAnonymousMemoryKbFieldNumber = 15,
|
|
kMaxHeapprofdMemoryKbFieldNumber = 16,
|
|
kMaxHeapprofdCpuSecsFieldNumber = 17,
|
|
kSkipSymbolPrefixFieldNumber = 7,
|
|
kContinuousDumpConfigFieldNumber = 6,
|
|
kShmemSizeBytesFieldNumber = 8,
|
|
kBlockClientFieldNumber = 9,
|
|
kBlockClientTimeoutUsFieldNumber = 14,
|
|
kNoStartupFieldNumber = 10,
|
|
kNoRunningFieldNumber = 11,
|
|
kIdleAllocationsFieldNumber = 12,
|
|
kDumpAtMaxFieldNumber = 13,
|
|
kDisableForkTeardownFieldNumber = 18,
|
|
kDisableVforkDetectionFieldNumber = 19,
|
|
};
|
|
|
|
HeapprofdConfig();
|
|
~HeapprofdConfig() override;
|
|
HeapprofdConfig(HeapprofdConfig&&) noexcept;
|
|
HeapprofdConfig& operator=(HeapprofdConfig&&);
|
|
HeapprofdConfig(const HeapprofdConfig&);
|
|
HeapprofdConfig& operator=(const HeapprofdConfig&);
|
|
bool operator==(const HeapprofdConfig&) const;
|
|
bool operator!=(const HeapprofdConfig& other) const { return !(*this == other); }
|
|
|
|
bool ParseFromArray(const void*, size_t) override;
|
|
std::string SerializeAsString() const override;
|
|
std::vector<uint8_t> SerializeAsArray() const override;
|
|
void Serialize(::protozero::Message*) const;
|
|
|
|
bool has_sampling_interval_bytes() const { return _has_field_[1]; }
|
|
uint64_t sampling_interval_bytes() const { return sampling_interval_bytes_; }
|
|
void set_sampling_interval_bytes(uint64_t value) { sampling_interval_bytes_ = value; _has_field_.set(1); }
|
|
|
|
int process_cmdline_size() const { return static_cast<int>(process_cmdline_.size()); }
|
|
const std::vector<std::string>& process_cmdline() const { return process_cmdline_; }
|
|
std::vector<std::string>* mutable_process_cmdline() { return &process_cmdline_; }
|
|
void clear_process_cmdline() { process_cmdline_.clear(); }
|
|
void add_process_cmdline(std::string value) { process_cmdline_.emplace_back(value); }
|
|
std::string* add_process_cmdline() { process_cmdline_.emplace_back(); return &process_cmdline_.back(); }
|
|
|
|
int pid_size() const { return static_cast<int>(pid_.size()); }
|
|
const std::vector<uint64_t>& pid() const { return pid_; }
|
|
std::vector<uint64_t>* mutable_pid() { return &pid_; }
|
|
void clear_pid() { pid_.clear(); }
|
|
void add_pid(uint64_t value) { pid_.emplace_back(value); }
|
|
uint64_t* add_pid() { pid_.emplace_back(); return &pid_.back(); }
|
|
|
|
int heaps_size() const { return static_cast<int>(heaps_.size()); }
|
|
const std::vector<std::string>& heaps() const { return heaps_; }
|
|
std::vector<std::string>* mutable_heaps() { return &heaps_; }
|
|
void clear_heaps() { heaps_.clear(); }
|
|
void add_heaps(std::string value) { heaps_.emplace_back(value); }
|
|
std::string* add_heaps() { heaps_.emplace_back(); return &heaps_.back(); }
|
|
|
|
bool has_all_heaps() const { return _has_field_[21]; }
|
|
bool all_heaps() const { return all_heaps_; }
|
|
void set_all_heaps(bool value) { all_heaps_ = value; _has_field_.set(21); }
|
|
|
|
bool has_all() const { return _has_field_[5]; }
|
|
bool all() const { return all_; }
|
|
void set_all(bool value) { all_ = value; _has_field_.set(5); }
|
|
|
|
bool has_min_anonymous_memory_kb() const { return _has_field_[15]; }
|
|
uint32_t min_anonymous_memory_kb() const { return min_anonymous_memory_kb_; }
|
|
void set_min_anonymous_memory_kb(uint32_t value) { min_anonymous_memory_kb_ = value; _has_field_.set(15); }
|
|
|
|
bool has_max_heapprofd_memory_kb() const { return _has_field_[16]; }
|
|
uint32_t max_heapprofd_memory_kb() const { return max_heapprofd_memory_kb_; }
|
|
void set_max_heapprofd_memory_kb(uint32_t value) { max_heapprofd_memory_kb_ = value; _has_field_.set(16); }
|
|
|
|
bool has_max_heapprofd_cpu_secs() const { return _has_field_[17]; }
|
|
uint64_t max_heapprofd_cpu_secs() const { return max_heapprofd_cpu_secs_; }
|
|
void set_max_heapprofd_cpu_secs(uint64_t value) { max_heapprofd_cpu_secs_ = value; _has_field_.set(17); }
|
|
|
|
int skip_symbol_prefix_size() const { return static_cast<int>(skip_symbol_prefix_.size()); }
|
|
const std::vector<std::string>& skip_symbol_prefix() const { return skip_symbol_prefix_; }
|
|
std::vector<std::string>* mutable_skip_symbol_prefix() { return &skip_symbol_prefix_; }
|
|
void clear_skip_symbol_prefix() { skip_symbol_prefix_.clear(); }
|
|
void add_skip_symbol_prefix(std::string value) { skip_symbol_prefix_.emplace_back(value); }
|
|
std::string* add_skip_symbol_prefix() { skip_symbol_prefix_.emplace_back(); return &skip_symbol_prefix_.back(); }
|
|
|
|
bool has_continuous_dump_config() const { return _has_field_[6]; }
|
|
const HeapprofdConfig_ContinuousDumpConfig& continuous_dump_config() const { return *continuous_dump_config_; }
|
|
HeapprofdConfig_ContinuousDumpConfig* mutable_continuous_dump_config() { _has_field_.set(6); return continuous_dump_config_.get(); }
|
|
|
|
bool has_shmem_size_bytes() const { return _has_field_[8]; }
|
|
uint64_t shmem_size_bytes() const { return shmem_size_bytes_; }
|
|
void set_shmem_size_bytes(uint64_t value) { shmem_size_bytes_ = value; _has_field_.set(8); }
|
|
|
|
bool has_block_client() const { return _has_field_[9]; }
|
|
bool block_client() const { return block_client_; }
|
|
void set_block_client(bool value) { block_client_ = value; _has_field_.set(9); }
|
|
|
|
bool has_block_client_timeout_us() const { return _has_field_[14]; }
|
|
uint32_t block_client_timeout_us() const { return block_client_timeout_us_; }
|
|
void set_block_client_timeout_us(uint32_t value) { block_client_timeout_us_ = value; _has_field_.set(14); }
|
|
|
|
bool has_no_startup() const { return _has_field_[10]; }
|
|
bool no_startup() const { return no_startup_; }
|
|
void set_no_startup(bool value) { no_startup_ = value; _has_field_.set(10); }
|
|
|
|
bool has_no_running() const { return _has_field_[11]; }
|
|
bool no_running() const { return no_running_; }
|
|
void set_no_running(bool value) { no_running_ = value; _has_field_.set(11); }
|
|
|
|
bool has_idle_allocations() const { return _has_field_[12]; }
|
|
bool idle_allocations() const { return idle_allocations_; }
|
|
void set_idle_allocations(bool value) { idle_allocations_ = value; _has_field_.set(12); }
|
|
|
|
bool has_dump_at_max() const { return _has_field_[13]; }
|
|
bool dump_at_max() const { return dump_at_max_; }
|
|
void set_dump_at_max(bool value) { dump_at_max_ = value; _has_field_.set(13); }
|
|
|
|
bool has_disable_fork_teardown() const { return _has_field_[18]; }
|
|
bool disable_fork_teardown() const { return disable_fork_teardown_; }
|
|
void set_disable_fork_teardown(bool value) { disable_fork_teardown_ = value; _has_field_.set(18); }
|
|
|
|
bool has_disable_vfork_detection() const { return _has_field_[19]; }
|
|
bool disable_vfork_detection() const { return disable_vfork_detection_; }
|
|
void set_disable_vfork_detection(bool value) { disable_vfork_detection_ = value; _has_field_.set(19); }
|
|
|
|
private:
|
|
uint64_t sampling_interval_bytes_{};
|
|
std::vector<std::string> process_cmdline_;
|
|
std::vector<uint64_t> pid_;
|
|
std::vector<std::string> heaps_;
|
|
bool all_heaps_{};
|
|
bool all_{};
|
|
uint32_t min_anonymous_memory_kb_{};
|
|
uint32_t max_heapprofd_memory_kb_{};
|
|
uint64_t max_heapprofd_cpu_secs_{};
|
|
std::vector<std::string> skip_symbol_prefix_;
|
|
::protozero::CopyablePtr<HeapprofdConfig_ContinuousDumpConfig> continuous_dump_config_;
|
|
uint64_t shmem_size_bytes_{};
|
|
bool block_client_{};
|
|
uint32_t block_client_timeout_us_{};
|
|
bool no_startup_{};
|
|
bool no_running_{};
|
|
bool idle_allocations_{};
|
|
bool dump_at_max_{};
|
|
bool disable_fork_teardown_{};
|
|
bool disable_vfork_detection_{};
|
|
|
|
// Allows to preserve unknown protobuf fields for compatibility
|
|
// with future versions of .proto files.
|
|
std::string unknown_fields_;
|
|
|
|
std::bitset<22> _has_field_{};
|
|
};
|
|
|
|
|
|
class PERFETTO_EXPORT HeapprofdConfig_ContinuousDumpConfig : public ::protozero::CppMessageObj {
|
|
public:
|
|
enum FieldNumbers {
|
|
kDumpPhaseMsFieldNumber = 5,
|
|
kDumpIntervalMsFieldNumber = 6,
|
|
};
|
|
|
|
HeapprofdConfig_ContinuousDumpConfig();
|
|
~HeapprofdConfig_ContinuousDumpConfig() override;
|
|
HeapprofdConfig_ContinuousDumpConfig(HeapprofdConfig_ContinuousDumpConfig&&) noexcept;
|
|
HeapprofdConfig_ContinuousDumpConfig& operator=(HeapprofdConfig_ContinuousDumpConfig&&);
|
|
HeapprofdConfig_ContinuousDumpConfig(const HeapprofdConfig_ContinuousDumpConfig&);
|
|
HeapprofdConfig_ContinuousDumpConfig& operator=(const HeapprofdConfig_ContinuousDumpConfig&);
|
|
bool operator==(const HeapprofdConfig_ContinuousDumpConfig&) const;
|
|
bool operator!=(const HeapprofdConfig_ContinuousDumpConfig& other) const { return !(*this == other); }
|
|
|
|
bool ParseFromArray(const void*, size_t) override;
|
|
std::string SerializeAsString() const override;
|
|
std::vector<uint8_t> SerializeAsArray() const override;
|
|
void Serialize(::protozero::Message*) const;
|
|
|
|
bool has_dump_phase_ms() const { return _has_field_[5]; }
|
|
uint32_t dump_phase_ms() const { return dump_phase_ms_; }
|
|
void set_dump_phase_ms(uint32_t value) { dump_phase_ms_ = value; _has_field_.set(5); }
|
|
|
|
bool has_dump_interval_ms() const { return _has_field_[6]; }
|
|
uint32_t dump_interval_ms() const { return dump_interval_ms_; }
|
|
void set_dump_interval_ms(uint32_t value) { dump_interval_ms_ = value; _has_field_.set(6); }
|
|
|
|
private:
|
|
uint32_t dump_phase_ms_{};
|
|
uint32_t dump_interval_ms_{};
|
|
|
|
// Allows to preserve unknown protobuf fields for compatibility
|
|
// with future versions of .proto files.
|
|
std::string unknown_fields_;
|
|
|
|
std::bitset<7> _has_field_{};
|
|
};
|
|
|
|
} // namespace perfetto
|
|
} // namespace protos
|
|
} // namespace gen
|
|
|
|
#endif // PERFETTO_PROTOS_PROTOS_PERFETTO_CONFIG_PROFILING_HEAPPROFD_CONFIG_PROTO_CPP_H_
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/scattered_heap_buffer.h"
|
|
// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
|
|
#pragma GCC diagnostic push
|
|
#pragma GCC diagnostic ignored "-Wfloat-equal"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/profiling/heapprofd_config.gen.h"
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace gen {
|
|
|
|
HeapprofdConfig::HeapprofdConfig() = default;
|
|
HeapprofdConfig::~HeapprofdConfig() = default;
|
|
HeapprofdConfig::HeapprofdConfig(const HeapprofdConfig&) = default;
|
|
HeapprofdConfig& HeapprofdConfig::operator=(const HeapprofdConfig&) = default;
|
|
HeapprofdConfig::HeapprofdConfig(HeapprofdConfig&&) noexcept = default;
|
|
HeapprofdConfig& HeapprofdConfig::operator=(HeapprofdConfig&&) = default;
|
|
|
|
bool HeapprofdConfig::operator==(const HeapprofdConfig& other) const {
|
|
return unknown_fields_ == other.unknown_fields_
|
|
&& sampling_interval_bytes_ == other.sampling_interval_bytes_
|
|
&& process_cmdline_ == other.process_cmdline_
|
|
&& pid_ == other.pid_
|
|
&& heaps_ == other.heaps_
|
|
&& all_heaps_ == other.all_heaps_
|
|
&& all_ == other.all_
|
|
&& min_anonymous_memory_kb_ == other.min_anonymous_memory_kb_
|
|
&& max_heapprofd_memory_kb_ == other.max_heapprofd_memory_kb_
|
|
&& max_heapprofd_cpu_secs_ == other.max_heapprofd_cpu_secs_
|
|
&& skip_symbol_prefix_ == other.skip_symbol_prefix_
|
|
&& continuous_dump_config_ == other.continuous_dump_config_
|
|
&& shmem_size_bytes_ == other.shmem_size_bytes_
|
|
&& block_client_ == other.block_client_
|
|
&& block_client_timeout_us_ == other.block_client_timeout_us_
|
|
&& no_startup_ == other.no_startup_
|
|
&& no_running_ == other.no_running_
|
|
&& idle_allocations_ == other.idle_allocations_
|
|
&& dump_at_max_ == other.dump_at_max_
|
|
&& disable_fork_teardown_ == other.disable_fork_teardown_
|
|
&& disable_vfork_detection_ == other.disable_vfork_detection_;
|
|
}
|
|
|
|
bool HeapprofdConfig::ParseFromArray(const void* raw, size_t size) {
|
|
process_cmdline_.clear();
|
|
pid_.clear();
|
|
heaps_.clear();
|
|
skip_symbol_prefix_.clear();
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* sampling_interval_bytes */:
|
|
field.get(&sampling_interval_bytes_);
|
|
break;
|
|
case 2 /* process_cmdline */:
|
|
process_cmdline_.emplace_back();
|
|
field.get(&process_cmdline_.back());
|
|
break;
|
|
case 4 /* pid */:
|
|
pid_.emplace_back();
|
|
field.get(&pid_.back());
|
|
break;
|
|
case 20 /* heaps */:
|
|
heaps_.emplace_back();
|
|
field.get(&heaps_.back());
|
|
break;
|
|
case 21 /* all_heaps */:
|
|
field.get(&all_heaps_);
|
|
break;
|
|
case 5 /* all */:
|
|
field.get(&all_);
|
|
break;
|
|
case 15 /* min_anonymous_memory_kb */:
|
|
field.get(&min_anonymous_memory_kb_);
|
|
break;
|
|
case 16 /* max_heapprofd_memory_kb */:
|
|
field.get(&max_heapprofd_memory_kb_);
|
|
break;
|
|
case 17 /* max_heapprofd_cpu_secs */:
|
|
field.get(&max_heapprofd_cpu_secs_);
|
|
break;
|
|
case 7 /* skip_symbol_prefix */:
|
|
skip_symbol_prefix_.emplace_back();
|
|
field.get(&skip_symbol_prefix_.back());
|
|
break;
|
|
case 6 /* continuous_dump_config */:
|
|
(*continuous_dump_config_).ParseFromString(field.as_std_string());
|
|
break;
|
|
case 8 /* shmem_size_bytes */:
|
|
field.get(&shmem_size_bytes_);
|
|
break;
|
|
case 9 /* block_client */:
|
|
field.get(&block_client_);
|
|
break;
|
|
case 14 /* block_client_timeout_us */:
|
|
field.get(&block_client_timeout_us_);
|
|
break;
|
|
case 10 /* no_startup */:
|
|
field.get(&no_startup_);
|
|
break;
|
|
case 11 /* no_running */:
|
|
field.get(&no_running_);
|
|
break;
|
|
case 12 /* idle_allocations */:
|
|
field.get(&idle_allocations_);
|
|
break;
|
|
case 13 /* dump_at_max */:
|
|
field.get(&dump_at_max_);
|
|
break;
|
|
case 18 /* disable_fork_teardown */:
|
|
field.get(&disable_fork_teardown_);
|
|
break;
|
|
case 19 /* disable_vfork_detection */:
|
|
field.get(&disable_vfork_detection_);
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string HeapprofdConfig::SerializeAsString() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> HeapprofdConfig::SerializeAsArray() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void HeapprofdConfig::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: sampling_interval_bytes
|
|
if (_has_field_[1]) {
|
|
msg->AppendVarInt(1, sampling_interval_bytes_);
|
|
}
|
|
|
|
// Field 2: process_cmdline
|
|
for (auto& it : process_cmdline_) {
|
|
msg->AppendString(2, it);
|
|
}
|
|
|
|
// Field 4: pid
|
|
for (auto& it : pid_) {
|
|
msg->AppendVarInt(4, it);
|
|
}
|
|
|
|
// Field 20: heaps
|
|
for (auto& it : heaps_) {
|
|
msg->AppendString(20, it);
|
|
}
|
|
|
|
// Field 21: all_heaps
|
|
if (_has_field_[21]) {
|
|
msg->AppendTinyVarInt(21, all_heaps_);
|
|
}
|
|
|
|
// Field 5: all
|
|
if (_has_field_[5]) {
|
|
msg->AppendTinyVarInt(5, all_);
|
|
}
|
|
|
|
// Field 15: min_anonymous_memory_kb
|
|
if (_has_field_[15]) {
|
|
msg->AppendVarInt(15, min_anonymous_memory_kb_);
|
|
}
|
|
|
|
// Field 16: max_heapprofd_memory_kb
|
|
if (_has_field_[16]) {
|
|
msg->AppendVarInt(16, max_heapprofd_memory_kb_);
|
|
}
|
|
|
|
// Field 17: max_heapprofd_cpu_secs
|
|
if (_has_field_[17]) {
|
|
msg->AppendVarInt(17, max_heapprofd_cpu_secs_);
|
|
}
|
|
|
|
// Field 7: skip_symbol_prefix
|
|
for (auto& it : skip_symbol_prefix_) {
|
|
msg->AppendString(7, it);
|
|
}
|
|
|
|
// Field 6: continuous_dump_config
|
|
if (_has_field_[6]) {
|
|
(*continuous_dump_config_).Serialize(msg->BeginNestedMessage<::protozero::Message>(6));
|
|
}
|
|
|
|
// Field 8: shmem_size_bytes
|
|
if (_has_field_[8]) {
|
|
msg->AppendVarInt(8, shmem_size_bytes_);
|
|
}
|
|
|
|
// Field 9: block_client
|
|
if (_has_field_[9]) {
|
|
msg->AppendTinyVarInt(9, block_client_);
|
|
}
|
|
|
|
// Field 14: block_client_timeout_us
|
|
if (_has_field_[14]) {
|
|
msg->AppendVarInt(14, block_client_timeout_us_);
|
|
}
|
|
|
|
// Field 10: no_startup
|
|
if (_has_field_[10]) {
|
|
msg->AppendTinyVarInt(10, no_startup_);
|
|
}
|
|
|
|
// Field 11: no_running
|
|
if (_has_field_[11]) {
|
|
msg->AppendTinyVarInt(11, no_running_);
|
|
}
|
|
|
|
// Field 12: idle_allocations
|
|
if (_has_field_[12]) {
|
|
msg->AppendTinyVarInt(12, idle_allocations_);
|
|
}
|
|
|
|
// Field 13: dump_at_max
|
|
if (_has_field_[13]) {
|
|
msg->AppendTinyVarInt(13, dump_at_max_);
|
|
}
|
|
|
|
// Field 18: disable_fork_teardown
|
|
if (_has_field_[18]) {
|
|
msg->AppendTinyVarInt(18, disable_fork_teardown_);
|
|
}
|
|
|
|
// Field 19: disable_vfork_detection
|
|
if (_has_field_[19]) {
|
|
msg->AppendTinyVarInt(19, disable_vfork_detection_);
|
|
}
|
|
|
|
msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
|
|
}
|
|
|
|
|
|
HeapprofdConfig_ContinuousDumpConfig::HeapprofdConfig_ContinuousDumpConfig() = default;
|
|
HeapprofdConfig_ContinuousDumpConfig::~HeapprofdConfig_ContinuousDumpConfig() = default;
|
|
HeapprofdConfig_ContinuousDumpConfig::HeapprofdConfig_ContinuousDumpConfig(const HeapprofdConfig_ContinuousDumpConfig&) = default;
|
|
HeapprofdConfig_ContinuousDumpConfig& HeapprofdConfig_ContinuousDumpConfig::operator=(const HeapprofdConfig_ContinuousDumpConfig&) = default;
|
|
HeapprofdConfig_ContinuousDumpConfig::HeapprofdConfig_ContinuousDumpConfig(HeapprofdConfig_ContinuousDumpConfig&&) noexcept = default;
|
|
HeapprofdConfig_ContinuousDumpConfig& HeapprofdConfig_ContinuousDumpConfig::operator=(HeapprofdConfig_ContinuousDumpConfig&&) = default;
|
|
|
|
bool HeapprofdConfig_ContinuousDumpConfig::operator==(const HeapprofdConfig_ContinuousDumpConfig& other) const {
|
|
return unknown_fields_ == other.unknown_fields_
|
|
&& dump_phase_ms_ == other.dump_phase_ms_
|
|
&& dump_interval_ms_ == other.dump_interval_ms_;
|
|
}
|
|
|
|
bool HeapprofdConfig_ContinuousDumpConfig::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 5 /* dump_phase_ms */:
|
|
field.get(&dump_phase_ms_);
|
|
break;
|
|
case 6 /* dump_interval_ms */:
|
|
field.get(&dump_interval_ms_);
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string HeapprofdConfig_ContinuousDumpConfig::SerializeAsString() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> HeapprofdConfig_ContinuousDumpConfig::SerializeAsArray() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void HeapprofdConfig_ContinuousDumpConfig::Serialize(::protozero::Message* msg) const {
|
|
// Field 5: dump_phase_ms
|
|
if (_has_field_[5]) {
|
|
msg->AppendVarInt(5, dump_phase_ms_);
|
|
}
|
|
|
|
// Field 6: dump_interval_ms
|
|
if (_has_field_[6]) {
|
|
msg->AppendVarInt(6, dump_interval_ms_);
|
|
}
|
|
|
|
msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
|
|
}
|
|
|
|
} // namespace perfetto
|
|
} // namespace protos
|
|
} // namespace gen
|
|
#pragma GCC diagnostic pop
|
|
// gen_amalgamated begin source: gen/protos/perfetto/config/profiling/java_hprof_config.gen.cc
|
|
// gen_amalgamated begin header: gen/protos/perfetto/config/profiling/java_hprof_config.gen.h
|
|
// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
|
|
#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_CONFIG_PROFILING_JAVA_HPROF_CONFIG_PROTO_CPP_H_
|
|
#define PERFETTO_PROTOS_PROTOS_PERFETTO_CONFIG_PROFILING_JAVA_HPROF_CONFIG_PROTO_CPP_H_
|
|
|
|
#include <stdint.h>
|
|
#include <bitset>
|
|
#include <vector>
|
|
#include <string>
|
|
#include <type_traits>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/cpp_message_obj.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/copyable_ptr.h"
|
|
// gen_amalgamated expanded: #include "perfetto/base/export.h"
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace gen {
|
|
class JavaHprofConfig;
|
|
class JavaHprofConfig_ContinuousDumpConfig;
|
|
} // namespace perfetto
|
|
} // namespace protos
|
|
} // namespace gen
|
|
|
|
namespace protozero {
|
|
class Message;
|
|
} // namespace protozero
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace gen {
|
|
|
|
class PERFETTO_EXPORT JavaHprofConfig : public ::protozero::CppMessageObj {
|
|
public:
|
|
using ContinuousDumpConfig = JavaHprofConfig_ContinuousDumpConfig;
|
|
enum FieldNumbers {
|
|
kProcessCmdlineFieldNumber = 1,
|
|
kPidFieldNumber = 2,
|
|
kContinuousDumpConfigFieldNumber = 3,
|
|
kMinAnonymousMemoryKbFieldNumber = 4,
|
|
kDumpSmapsFieldNumber = 5,
|
|
};
|
|
|
|
JavaHprofConfig();
|
|
~JavaHprofConfig() override;
|
|
JavaHprofConfig(JavaHprofConfig&&) noexcept;
|
|
JavaHprofConfig& operator=(JavaHprofConfig&&);
|
|
JavaHprofConfig(const JavaHprofConfig&);
|
|
JavaHprofConfig& operator=(const JavaHprofConfig&);
|
|
bool operator==(const JavaHprofConfig&) const;
|
|
bool operator!=(const JavaHprofConfig& other) const { return !(*this == other); }
|
|
|
|
bool ParseFromArray(const void*, size_t) override;
|
|
std::string SerializeAsString() const override;
|
|
std::vector<uint8_t> SerializeAsArray() const override;
|
|
void Serialize(::protozero::Message*) const;
|
|
|
|
int process_cmdline_size() const { return static_cast<int>(process_cmdline_.size()); }
|
|
const std::vector<std::string>& process_cmdline() const { return process_cmdline_; }
|
|
std::vector<std::string>* mutable_process_cmdline() { return &process_cmdline_; }
|
|
void clear_process_cmdline() { process_cmdline_.clear(); }
|
|
void add_process_cmdline(std::string value) { process_cmdline_.emplace_back(value); }
|
|
std::string* add_process_cmdline() { process_cmdline_.emplace_back(); return &process_cmdline_.back(); }
|
|
|
|
int pid_size() const { return static_cast<int>(pid_.size()); }
|
|
const std::vector<uint64_t>& pid() const { return pid_; }
|
|
std::vector<uint64_t>* mutable_pid() { return &pid_; }
|
|
void clear_pid() { pid_.clear(); }
|
|
void add_pid(uint64_t value) { pid_.emplace_back(value); }
|
|
uint64_t* add_pid() { pid_.emplace_back(); return &pid_.back(); }
|
|
|
|
bool has_continuous_dump_config() const { return _has_field_[3]; }
|
|
const JavaHprofConfig_ContinuousDumpConfig& continuous_dump_config() const { return *continuous_dump_config_; }
|
|
JavaHprofConfig_ContinuousDumpConfig* mutable_continuous_dump_config() { _has_field_.set(3); return continuous_dump_config_.get(); }
|
|
|
|
bool has_min_anonymous_memory_kb() const { return _has_field_[4]; }
|
|
uint32_t min_anonymous_memory_kb() const { return min_anonymous_memory_kb_; }
|
|
void set_min_anonymous_memory_kb(uint32_t value) { min_anonymous_memory_kb_ = value; _has_field_.set(4); }
|
|
|
|
bool has_dump_smaps() const { return _has_field_[5]; }
|
|
bool dump_smaps() const { return dump_smaps_; }
|
|
void set_dump_smaps(bool value) { dump_smaps_ = value; _has_field_.set(5); }
|
|
|
|
private:
|
|
std::vector<std::string> process_cmdline_;
|
|
std::vector<uint64_t> pid_;
|
|
::protozero::CopyablePtr<JavaHprofConfig_ContinuousDumpConfig> continuous_dump_config_;
|
|
uint32_t min_anonymous_memory_kb_{};
|
|
bool dump_smaps_{};
|
|
|
|
// Allows to preserve unknown protobuf fields for compatibility
|
|
// with future versions of .proto files.
|
|
std::string unknown_fields_;
|
|
|
|
std::bitset<6> _has_field_{};
|
|
};
|
|
|
|
|
|
class PERFETTO_EXPORT JavaHprofConfig_ContinuousDumpConfig : public ::protozero::CppMessageObj {
|
|
public:
|
|
enum FieldNumbers {
|
|
kDumpPhaseMsFieldNumber = 1,
|
|
kDumpIntervalMsFieldNumber = 2,
|
|
};
|
|
|
|
JavaHprofConfig_ContinuousDumpConfig();
|
|
~JavaHprofConfig_ContinuousDumpConfig() override;
|
|
JavaHprofConfig_ContinuousDumpConfig(JavaHprofConfig_ContinuousDumpConfig&&) noexcept;
|
|
JavaHprofConfig_ContinuousDumpConfig& operator=(JavaHprofConfig_ContinuousDumpConfig&&);
|
|
JavaHprofConfig_ContinuousDumpConfig(const JavaHprofConfig_ContinuousDumpConfig&);
|
|
JavaHprofConfig_ContinuousDumpConfig& operator=(const JavaHprofConfig_ContinuousDumpConfig&);
|
|
bool operator==(const JavaHprofConfig_ContinuousDumpConfig&) const;
|
|
bool operator!=(const JavaHprofConfig_ContinuousDumpConfig& other) const { return !(*this == other); }
|
|
|
|
bool ParseFromArray(const void*, size_t) override;
|
|
std::string SerializeAsString() const override;
|
|
std::vector<uint8_t> SerializeAsArray() const override;
|
|
void Serialize(::protozero::Message*) const;
|
|
|
|
bool has_dump_phase_ms() const { return _has_field_[1]; }
|
|
uint32_t dump_phase_ms() const { return dump_phase_ms_; }
|
|
void set_dump_phase_ms(uint32_t value) { dump_phase_ms_ = value; _has_field_.set(1); }
|
|
|
|
bool has_dump_interval_ms() const { return _has_field_[2]; }
|
|
uint32_t dump_interval_ms() const { return dump_interval_ms_; }
|
|
void set_dump_interval_ms(uint32_t value) { dump_interval_ms_ = value; _has_field_.set(2); }
|
|
|
|
private:
|
|
uint32_t dump_phase_ms_{};
|
|
uint32_t dump_interval_ms_{};
|
|
|
|
// Allows to preserve unknown protobuf fields for compatibility
|
|
// with future versions of .proto files.
|
|
std::string unknown_fields_;
|
|
|
|
std::bitset<3> _has_field_{};
|
|
};
|
|
|
|
} // namespace perfetto
|
|
} // namespace protos
|
|
} // namespace gen
|
|
|
|
#endif // PERFETTO_PROTOS_PROTOS_PERFETTO_CONFIG_PROFILING_JAVA_HPROF_CONFIG_PROTO_CPP_H_
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/scattered_heap_buffer.h"
|
|
// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
|
|
#pragma GCC diagnostic push
|
|
#pragma GCC diagnostic ignored "-Wfloat-equal"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/profiling/java_hprof_config.gen.h"
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace gen {
|
|
|
|
JavaHprofConfig::JavaHprofConfig() = default;
|
|
JavaHprofConfig::~JavaHprofConfig() = default;
|
|
JavaHprofConfig::JavaHprofConfig(const JavaHprofConfig&) = default;
|
|
JavaHprofConfig& JavaHprofConfig::operator=(const JavaHprofConfig&) = default;
|
|
JavaHprofConfig::JavaHprofConfig(JavaHprofConfig&&) noexcept = default;
|
|
JavaHprofConfig& JavaHprofConfig::operator=(JavaHprofConfig&&) = default;
|
|
|
|
bool JavaHprofConfig::operator==(const JavaHprofConfig& other) const {
|
|
return unknown_fields_ == other.unknown_fields_
|
|
&& process_cmdline_ == other.process_cmdline_
|
|
&& pid_ == other.pid_
|
|
&& continuous_dump_config_ == other.continuous_dump_config_
|
|
&& min_anonymous_memory_kb_ == other.min_anonymous_memory_kb_
|
|
&& dump_smaps_ == other.dump_smaps_;
|
|
}
|
|
|
|
bool JavaHprofConfig::ParseFromArray(const void* raw, size_t size) {
|
|
process_cmdline_.clear();
|
|
pid_.clear();
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* process_cmdline */:
|
|
process_cmdline_.emplace_back();
|
|
field.get(&process_cmdline_.back());
|
|
break;
|
|
case 2 /* pid */:
|
|
pid_.emplace_back();
|
|
field.get(&pid_.back());
|
|
break;
|
|
case 3 /* continuous_dump_config */:
|
|
(*continuous_dump_config_).ParseFromString(field.as_std_string());
|
|
break;
|
|
case 4 /* min_anonymous_memory_kb */:
|
|
field.get(&min_anonymous_memory_kb_);
|
|
break;
|
|
case 5 /* dump_smaps */:
|
|
field.get(&dump_smaps_);
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string JavaHprofConfig::SerializeAsString() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> JavaHprofConfig::SerializeAsArray() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void JavaHprofConfig::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: process_cmdline
|
|
for (auto& it : process_cmdline_) {
|
|
msg->AppendString(1, it);
|
|
}
|
|
|
|
// Field 2: pid
|
|
for (auto& it : pid_) {
|
|
msg->AppendVarInt(2, it);
|
|
}
|
|
|
|
// Field 3: continuous_dump_config
|
|
if (_has_field_[3]) {
|
|
(*continuous_dump_config_).Serialize(msg->BeginNestedMessage<::protozero::Message>(3));
|
|
}
|
|
|
|
// Field 4: min_anonymous_memory_kb
|
|
if (_has_field_[4]) {
|
|
msg->AppendVarInt(4, min_anonymous_memory_kb_);
|
|
}
|
|
|
|
// Field 5: dump_smaps
|
|
if (_has_field_[5]) {
|
|
msg->AppendTinyVarInt(5, dump_smaps_);
|
|
}
|
|
|
|
msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
|
|
}
|
|
|
|
|
|
JavaHprofConfig_ContinuousDumpConfig::JavaHprofConfig_ContinuousDumpConfig() = default;
|
|
JavaHprofConfig_ContinuousDumpConfig::~JavaHprofConfig_ContinuousDumpConfig() = default;
|
|
JavaHprofConfig_ContinuousDumpConfig::JavaHprofConfig_ContinuousDumpConfig(const JavaHprofConfig_ContinuousDumpConfig&) = default;
|
|
JavaHprofConfig_ContinuousDumpConfig& JavaHprofConfig_ContinuousDumpConfig::operator=(const JavaHprofConfig_ContinuousDumpConfig&) = default;
|
|
JavaHprofConfig_ContinuousDumpConfig::JavaHprofConfig_ContinuousDumpConfig(JavaHprofConfig_ContinuousDumpConfig&&) noexcept = default;
|
|
JavaHprofConfig_ContinuousDumpConfig& JavaHprofConfig_ContinuousDumpConfig::operator=(JavaHprofConfig_ContinuousDumpConfig&&) = default;
|
|
|
|
bool JavaHprofConfig_ContinuousDumpConfig::operator==(const JavaHprofConfig_ContinuousDumpConfig& other) const {
|
|
return unknown_fields_ == other.unknown_fields_
|
|
&& dump_phase_ms_ == other.dump_phase_ms_
|
|
&& dump_interval_ms_ == other.dump_interval_ms_;
|
|
}
|
|
|
|
bool JavaHprofConfig_ContinuousDumpConfig::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* dump_phase_ms */:
|
|
field.get(&dump_phase_ms_);
|
|
break;
|
|
case 2 /* dump_interval_ms */:
|
|
field.get(&dump_interval_ms_);
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string JavaHprofConfig_ContinuousDumpConfig::SerializeAsString() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> JavaHprofConfig_ContinuousDumpConfig::SerializeAsArray() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void JavaHprofConfig_ContinuousDumpConfig::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: dump_phase_ms
|
|
if (_has_field_[1]) {
|
|
msg->AppendVarInt(1, dump_phase_ms_);
|
|
}
|
|
|
|
// Field 2: dump_interval_ms
|
|
if (_has_field_[2]) {
|
|
msg->AppendVarInt(2, dump_interval_ms_);
|
|
}
|
|
|
|
msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
|
|
}
|
|
|
|
} // namespace perfetto
|
|
} // namespace protos
|
|
} // namespace gen
|
|
#pragma GCC diagnostic pop
|
|
// gen_amalgamated begin source: gen/protos/perfetto/config/profiling/perf_event_config.gen.cc
|
|
// gen_amalgamated begin header: gen/protos/perfetto/config/profiling/perf_event_config.gen.h
|
|
// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
|
|
#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_CONFIG_PROFILING_PERF_EVENT_CONFIG_PROTO_CPP_H_
|
|
#define PERFETTO_PROTOS_PROTOS_PERFETTO_CONFIG_PROFILING_PERF_EVENT_CONFIG_PROTO_CPP_H_
|
|
|
|
#include <stdint.h>
|
|
#include <bitset>
|
|
#include <vector>
|
|
#include <string>
|
|
#include <type_traits>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/cpp_message_obj.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/copyable_ptr.h"
|
|
// gen_amalgamated expanded: #include "perfetto/base/export.h"
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace gen {
|
|
class PerfEventConfig;
|
|
} // namespace perfetto
|
|
} // namespace protos
|
|
} // namespace gen
|
|
|
|
namespace protozero {
|
|
class Message;
|
|
} // namespace protozero
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace gen {
|
|
|
|
class PERFETTO_EXPORT PerfEventConfig : public ::protozero::CppMessageObj {
|
|
public:
|
|
enum FieldNumbers {
|
|
kAllCpusFieldNumber = 1,
|
|
kSamplingFrequencyFieldNumber = 2,
|
|
kRingBufferReadPeriodMsFieldNumber = 8,
|
|
kRingBufferPagesFieldNumber = 3,
|
|
kTargetPidFieldNumber = 4,
|
|
kTargetCmdlineFieldNumber = 5,
|
|
kExcludePidFieldNumber = 6,
|
|
kExcludeCmdlineFieldNumber = 7,
|
|
kRemoteDescriptorTimeoutMsFieldNumber = 9,
|
|
kUnwindStateClearPeriodMsFieldNumber = 10,
|
|
};
|
|
|
|
PerfEventConfig();
|
|
~PerfEventConfig() override;
|
|
PerfEventConfig(PerfEventConfig&&) noexcept;
|
|
PerfEventConfig& operator=(PerfEventConfig&&);
|
|
PerfEventConfig(const PerfEventConfig&);
|
|
PerfEventConfig& operator=(const PerfEventConfig&);
|
|
bool operator==(const PerfEventConfig&) const;
|
|
bool operator!=(const PerfEventConfig& other) const { return !(*this == other); }
|
|
|
|
bool ParseFromArray(const void*, size_t) override;
|
|
std::string SerializeAsString() const override;
|
|
std::vector<uint8_t> SerializeAsArray() const override;
|
|
void Serialize(::protozero::Message*) const;
|
|
|
|
bool has_all_cpus() const { return _has_field_[1]; }
|
|
bool all_cpus() const { return all_cpus_; }
|
|
void set_all_cpus(bool value) { all_cpus_ = value; _has_field_.set(1); }
|
|
|
|
bool has_sampling_frequency() const { return _has_field_[2]; }
|
|
uint32_t sampling_frequency() const { return sampling_frequency_; }
|
|
void set_sampling_frequency(uint32_t value) { sampling_frequency_ = value; _has_field_.set(2); }
|
|
|
|
bool has_ring_buffer_read_period_ms() const { return _has_field_[8]; }
|
|
uint32_t ring_buffer_read_period_ms() const { return ring_buffer_read_period_ms_; }
|
|
void set_ring_buffer_read_period_ms(uint32_t value) { ring_buffer_read_period_ms_ = value; _has_field_.set(8); }
|
|
|
|
bool has_ring_buffer_pages() const { return _has_field_[3]; }
|
|
uint32_t ring_buffer_pages() const { return ring_buffer_pages_; }
|
|
void set_ring_buffer_pages(uint32_t value) { ring_buffer_pages_ = value; _has_field_.set(3); }
|
|
|
|
int target_pid_size() const { return static_cast<int>(target_pid_.size()); }
|
|
const std::vector<int32_t>& target_pid() const { return target_pid_; }
|
|
std::vector<int32_t>* mutable_target_pid() { return &target_pid_; }
|
|
void clear_target_pid() { target_pid_.clear(); }
|
|
void add_target_pid(int32_t value) { target_pid_.emplace_back(value); }
|
|
int32_t* add_target_pid() { target_pid_.emplace_back(); return &target_pid_.back(); }
|
|
|
|
int target_cmdline_size() const { return static_cast<int>(target_cmdline_.size()); }
|
|
const std::vector<std::string>& target_cmdline() const { return target_cmdline_; }
|
|
std::vector<std::string>* mutable_target_cmdline() { return &target_cmdline_; }
|
|
void clear_target_cmdline() { target_cmdline_.clear(); }
|
|
void add_target_cmdline(std::string value) { target_cmdline_.emplace_back(value); }
|
|
std::string* add_target_cmdline() { target_cmdline_.emplace_back(); return &target_cmdline_.back(); }
|
|
|
|
int exclude_pid_size() const { return static_cast<int>(exclude_pid_.size()); }
|
|
const std::vector<int32_t>& exclude_pid() const { return exclude_pid_; }
|
|
std::vector<int32_t>* mutable_exclude_pid() { return &exclude_pid_; }
|
|
void clear_exclude_pid() { exclude_pid_.clear(); }
|
|
void add_exclude_pid(int32_t value) { exclude_pid_.emplace_back(value); }
|
|
int32_t* add_exclude_pid() { exclude_pid_.emplace_back(); return &exclude_pid_.back(); }
|
|
|
|
int exclude_cmdline_size() const { return static_cast<int>(exclude_cmdline_.size()); }
|
|
const std::vector<std::string>& exclude_cmdline() const { return exclude_cmdline_; }
|
|
std::vector<std::string>* mutable_exclude_cmdline() { return &exclude_cmdline_; }
|
|
void clear_exclude_cmdline() { exclude_cmdline_.clear(); }
|
|
void add_exclude_cmdline(std::string value) { exclude_cmdline_.emplace_back(value); }
|
|
std::string* add_exclude_cmdline() { exclude_cmdline_.emplace_back(); return &exclude_cmdline_.back(); }
|
|
|
|
bool has_remote_descriptor_timeout_ms() const { return _has_field_[9]; }
|
|
uint32_t remote_descriptor_timeout_ms() const { return remote_descriptor_timeout_ms_; }
|
|
void set_remote_descriptor_timeout_ms(uint32_t value) { remote_descriptor_timeout_ms_ = value; _has_field_.set(9); }
|
|
|
|
bool has_unwind_state_clear_period_ms() const { return _has_field_[10]; }
|
|
uint32_t unwind_state_clear_period_ms() const { return unwind_state_clear_period_ms_; }
|
|
void set_unwind_state_clear_period_ms(uint32_t value) { unwind_state_clear_period_ms_ = value; _has_field_.set(10); }
|
|
|
|
private:
|
|
bool all_cpus_{};
|
|
uint32_t sampling_frequency_{};
|
|
uint32_t ring_buffer_read_period_ms_{};
|
|
uint32_t ring_buffer_pages_{};
|
|
std::vector<int32_t> target_pid_;
|
|
std::vector<std::string> target_cmdline_;
|
|
std::vector<int32_t> exclude_pid_;
|
|
std::vector<std::string> exclude_cmdline_;
|
|
uint32_t remote_descriptor_timeout_ms_{};
|
|
uint32_t unwind_state_clear_period_ms_{};
|
|
|
|
// Allows to preserve unknown protobuf fields for compatibility
|
|
// with future versions of .proto files.
|
|
std::string unknown_fields_;
|
|
|
|
std::bitset<11> _has_field_{};
|
|
};
|
|
|
|
} // namespace perfetto
|
|
} // namespace protos
|
|
} // namespace gen
|
|
|
|
#endif // PERFETTO_PROTOS_PROTOS_PERFETTO_CONFIG_PROFILING_PERF_EVENT_CONFIG_PROTO_CPP_H_
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/scattered_heap_buffer.h"
|
|
// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
|
|
#pragma GCC diagnostic push
|
|
#pragma GCC diagnostic ignored "-Wfloat-equal"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/profiling/perf_event_config.gen.h"
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace gen {
|
|
|
|
PerfEventConfig::PerfEventConfig() = default;
|
|
PerfEventConfig::~PerfEventConfig() = default;
|
|
PerfEventConfig::PerfEventConfig(const PerfEventConfig&) = default;
|
|
PerfEventConfig& PerfEventConfig::operator=(const PerfEventConfig&) = default;
|
|
PerfEventConfig::PerfEventConfig(PerfEventConfig&&) noexcept = default;
|
|
PerfEventConfig& PerfEventConfig::operator=(PerfEventConfig&&) = default;
|
|
|
|
bool PerfEventConfig::operator==(const PerfEventConfig& other) const {
|
|
return unknown_fields_ == other.unknown_fields_
|
|
&& all_cpus_ == other.all_cpus_
|
|
&& sampling_frequency_ == other.sampling_frequency_
|
|
&& ring_buffer_read_period_ms_ == other.ring_buffer_read_period_ms_
|
|
&& ring_buffer_pages_ == other.ring_buffer_pages_
|
|
&& target_pid_ == other.target_pid_
|
|
&& target_cmdline_ == other.target_cmdline_
|
|
&& exclude_pid_ == other.exclude_pid_
|
|
&& exclude_cmdline_ == other.exclude_cmdline_
|
|
&& remote_descriptor_timeout_ms_ == other.remote_descriptor_timeout_ms_
|
|
&& unwind_state_clear_period_ms_ == other.unwind_state_clear_period_ms_;
|
|
}
|
|
|
|
bool PerfEventConfig::ParseFromArray(const void* raw, size_t size) {
|
|
target_pid_.clear();
|
|
target_cmdline_.clear();
|
|
exclude_pid_.clear();
|
|
exclude_cmdline_.clear();
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* all_cpus */:
|
|
field.get(&all_cpus_);
|
|
break;
|
|
case 2 /* sampling_frequency */:
|
|
field.get(&sampling_frequency_);
|
|
break;
|
|
case 8 /* ring_buffer_read_period_ms */:
|
|
field.get(&ring_buffer_read_period_ms_);
|
|
break;
|
|
case 3 /* ring_buffer_pages */:
|
|
field.get(&ring_buffer_pages_);
|
|
break;
|
|
case 4 /* target_pid */:
|
|
target_pid_.emplace_back();
|
|
field.get(&target_pid_.back());
|
|
break;
|
|
case 5 /* target_cmdline */:
|
|
target_cmdline_.emplace_back();
|
|
field.get(&target_cmdline_.back());
|
|
break;
|
|
case 6 /* exclude_pid */:
|
|
exclude_pid_.emplace_back();
|
|
field.get(&exclude_pid_.back());
|
|
break;
|
|
case 7 /* exclude_cmdline */:
|
|
exclude_cmdline_.emplace_back();
|
|
field.get(&exclude_cmdline_.back());
|
|
break;
|
|
case 9 /* remote_descriptor_timeout_ms */:
|
|
field.get(&remote_descriptor_timeout_ms_);
|
|
break;
|
|
case 10 /* unwind_state_clear_period_ms */:
|
|
field.get(&unwind_state_clear_period_ms_);
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string PerfEventConfig::SerializeAsString() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> PerfEventConfig::SerializeAsArray() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void PerfEventConfig::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: all_cpus
|
|
if (_has_field_[1]) {
|
|
msg->AppendTinyVarInt(1, all_cpus_);
|
|
}
|
|
|
|
// Field 2: sampling_frequency
|
|
if (_has_field_[2]) {
|
|
msg->AppendVarInt(2, sampling_frequency_);
|
|
}
|
|
|
|
// Field 8: ring_buffer_read_period_ms
|
|
if (_has_field_[8]) {
|
|
msg->AppendVarInt(8, ring_buffer_read_period_ms_);
|
|
}
|
|
|
|
// Field 3: ring_buffer_pages
|
|
if (_has_field_[3]) {
|
|
msg->AppendVarInt(3, ring_buffer_pages_);
|
|
}
|
|
|
|
// Field 4: target_pid
|
|
for (auto& it : target_pid_) {
|
|
msg->AppendVarInt(4, it);
|
|
}
|
|
|
|
// Field 5: target_cmdline
|
|
for (auto& it : target_cmdline_) {
|
|
msg->AppendString(5, it);
|
|
}
|
|
|
|
// Field 6: exclude_pid
|
|
for (auto& it : exclude_pid_) {
|
|
msg->AppendVarInt(6, it);
|
|
}
|
|
|
|
// Field 7: exclude_cmdline
|
|
for (auto& it : exclude_cmdline_) {
|
|
msg->AppendString(7, it);
|
|
}
|
|
|
|
// Field 9: remote_descriptor_timeout_ms
|
|
if (_has_field_[9]) {
|
|
msg->AppendVarInt(9, remote_descriptor_timeout_ms_);
|
|
}
|
|
|
|
// Field 10: unwind_state_clear_period_ms
|
|
if (_has_field_[10]) {
|
|
msg->AppendVarInt(10, unwind_state_clear_period_ms_);
|
|
}
|
|
|
|
msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
|
|
}
|
|
|
|
} // namespace perfetto
|
|
} // namespace protos
|
|
} // namespace gen
|
|
#pragma GCC diagnostic pop
|
|
// gen_amalgamated begin source: gen/protos/perfetto/config/sys_stats/sys_stats_config.gen.cc
|
|
// gen_amalgamated begin header: gen/protos/perfetto/config/sys_stats/sys_stats_config.gen.h
|
|
// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
|
|
#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_CONFIG_SYS_STATS_SYS_STATS_CONFIG_PROTO_CPP_H_
|
|
#define PERFETTO_PROTOS_PROTOS_PERFETTO_CONFIG_SYS_STATS_SYS_STATS_CONFIG_PROTO_CPP_H_
|
|
|
|
#include <stdint.h>
|
|
#include <bitset>
|
|
#include <vector>
|
|
#include <string>
|
|
#include <type_traits>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/cpp_message_obj.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/copyable_ptr.h"
|
|
// gen_amalgamated expanded: #include "perfetto/base/export.h"
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace gen {
|
|
class SysStatsConfig;
|
|
enum SysStatsConfig_StatCounters : int;
|
|
enum MeminfoCounters : int;
|
|
enum VmstatCounters : int;
|
|
} // namespace perfetto
|
|
} // namespace protos
|
|
} // namespace gen
|
|
|
|
namespace protozero {
|
|
class Message;
|
|
} // namespace protozero
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace gen {
|
|
enum SysStatsConfig_StatCounters : int {
|
|
SysStatsConfig_StatCounters_STAT_UNSPECIFIED = 0,
|
|
SysStatsConfig_StatCounters_STAT_CPU_TIMES = 1,
|
|
SysStatsConfig_StatCounters_STAT_IRQ_COUNTS = 2,
|
|
SysStatsConfig_StatCounters_STAT_SOFTIRQ_COUNTS = 3,
|
|
SysStatsConfig_StatCounters_STAT_FORK_COUNT = 4,
|
|
};
|
|
|
|
class PERFETTO_EXPORT SysStatsConfig : public ::protozero::CppMessageObj {
|
|
public:
|
|
using StatCounters = SysStatsConfig_StatCounters;
|
|
static constexpr auto STAT_UNSPECIFIED = SysStatsConfig_StatCounters_STAT_UNSPECIFIED;
|
|
static constexpr auto STAT_CPU_TIMES = SysStatsConfig_StatCounters_STAT_CPU_TIMES;
|
|
static constexpr auto STAT_IRQ_COUNTS = SysStatsConfig_StatCounters_STAT_IRQ_COUNTS;
|
|
static constexpr auto STAT_SOFTIRQ_COUNTS = SysStatsConfig_StatCounters_STAT_SOFTIRQ_COUNTS;
|
|
static constexpr auto STAT_FORK_COUNT = SysStatsConfig_StatCounters_STAT_FORK_COUNT;
|
|
static constexpr auto StatCounters_MIN = SysStatsConfig_StatCounters_STAT_UNSPECIFIED;
|
|
static constexpr auto StatCounters_MAX = SysStatsConfig_StatCounters_STAT_FORK_COUNT;
|
|
enum FieldNumbers {
|
|
kMeminfoPeriodMsFieldNumber = 1,
|
|
kMeminfoCountersFieldNumber = 2,
|
|
kVmstatPeriodMsFieldNumber = 3,
|
|
kVmstatCountersFieldNumber = 4,
|
|
kStatPeriodMsFieldNumber = 5,
|
|
kStatCountersFieldNumber = 6,
|
|
};
|
|
|
|
SysStatsConfig();
|
|
~SysStatsConfig() override;
|
|
SysStatsConfig(SysStatsConfig&&) noexcept;
|
|
SysStatsConfig& operator=(SysStatsConfig&&);
|
|
SysStatsConfig(const SysStatsConfig&);
|
|
SysStatsConfig& operator=(const SysStatsConfig&);
|
|
bool operator==(const SysStatsConfig&) const;
|
|
bool operator!=(const SysStatsConfig& other) const { return !(*this == other); }
|
|
|
|
bool ParseFromArray(const void*, size_t) override;
|
|
std::string SerializeAsString() const override;
|
|
std::vector<uint8_t> SerializeAsArray() const override;
|
|
void Serialize(::protozero::Message*) const;
|
|
|
|
bool has_meminfo_period_ms() const { return _has_field_[1]; }
|
|
uint32_t meminfo_period_ms() const { return meminfo_period_ms_; }
|
|
void set_meminfo_period_ms(uint32_t value) { meminfo_period_ms_ = value; _has_field_.set(1); }
|
|
|
|
int meminfo_counters_size() const { return static_cast<int>(meminfo_counters_.size()); }
|
|
const std::vector<MeminfoCounters>& meminfo_counters() const { return meminfo_counters_; }
|
|
std::vector<MeminfoCounters>* mutable_meminfo_counters() { return &meminfo_counters_; }
|
|
void clear_meminfo_counters() { meminfo_counters_.clear(); }
|
|
void add_meminfo_counters(MeminfoCounters value) { meminfo_counters_.emplace_back(value); }
|
|
MeminfoCounters* add_meminfo_counters() { meminfo_counters_.emplace_back(); return &meminfo_counters_.back(); }
|
|
|
|
bool has_vmstat_period_ms() const { return _has_field_[3]; }
|
|
uint32_t vmstat_period_ms() const { return vmstat_period_ms_; }
|
|
void set_vmstat_period_ms(uint32_t value) { vmstat_period_ms_ = value; _has_field_.set(3); }
|
|
|
|
int vmstat_counters_size() const { return static_cast<int>(vmstat_counters_.size()); }
|
|
const std::vector<VmstatCounters>& vmstat_counters() const { return vmstat_counters_; }
|
|
std::vector<VmstatCounters>* mutable_vmstat_counters() { return &vmstat_counters_; }
|
|
void clear_vmstat_counters() { vmstat_counters_.clear(); }
|
|
void add_vmstat_counters(VmstatCounters value) { vmstat_counters_.emplace_back(value); }
|
|
VmstatCounters* add_vmstat_counters() { vmstat_counters_.emplace_back(); return &vmstat_counters_.back(); }
|
|
|
|
bool has_stat_period_ms() const { return _has_field_[5]; }
|
|
uint32_t stat_period_ms() const { return stat_period_ms_; }
|
|
void set_stat_period_ms(uint32_t value) { stat_period_ms_ = value; _has_field_.set(5); }
|
|
|
|
int stat_counters_size() const { return static_cast<int>(stat_counters_.size()); }
|
|
const std::vector<SysStatsConfig_StatCounters>& stat_counters() const { return stat_counters_; }
|
|
std::vector<SysStatsConfig_StatCounters>* mutable_stat_counters() { return &stat_counters_; }
|
|
void clear_stat_counters() { stat_counters_.clear(); }
|
|
void add_stat_counters(SysStatsConfig_StatCounters value) { stat_counters_.emplace_back(value); }
|
|
SysStatsConfig_StatCounters* add_stat_counters() { stat_counters_.emplace_back(); return &stat_counters_.back(); }
|
|
|
|
private:
|
|
uint32_t meminfo_period_ms_{};
|
|
std::vector<MeminfoCounters> meminfo_counters_;
|
|
uint32_t vmstat_period_ms_{};
|
|
std::vector<VmstatCounters> vmstat_counters_;
|
|
uint32_t stat_period_ms_{};
|
|
std::vector<SysStatsConfig_StatCounters> stat_counters_;
|
|
|
|
// Allows to preserve unknown protobuf fields for compatibility
|
|
// with future versions of .proto files.
|
|
std::string unknown_fields_;
|
|
|
|
std::bitset<7> _has_field_{};
|
|
};
|
|
|
|
} // namespace perfetto
|
|
} // namespace protos
|
|
} // namespace gen
|
|
|
|
#endif // PERFETTO_PROTOS_PROTOS_PERFETTO_CONFIG_SYS_STATS_SYS_STATS_CONFIG_PROTO_CPP_H_
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/scattered_heap_buffer.h"
|
|
// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
|
|
#pragma GCC diagnostic push
|
|
#pragma GCC diagnostic ignored "-Wfloat-equal"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/sys_stats/sys_stats_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/common/sys_stats_counters.gen.h"
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace gen {
|
|
|
|
SysStatsConfig::SysStatsConfig() = default;
|
|
SysStatsConfig::~SysStatsConfig() = default;
|
|
SysStatsConfig::SysStatsConfig(const SysStatsConfig&) = default;
|
|
SysStatsConfig& SysStatsConfig::operator=(const SysStatsConfig&) = default;
|
|
SysStatsConfig::SysStatsConfig(SysStatsConfig&&) noexcept = default;
|
|
SysStatsConfig& SysStatsConfig::operator=(SysStatsConfig&&) = default;
|
|
|
|
bool SysStatsConfig::operator==(const SysStatsConfig& other) const {
|
|
return unknown_fields_ == other.unknown_fields_
|
|
&& meminfo_period_ms_ == other.meminfo_period_ms_
|
|
&& meminfo_counters_ == other.meminfo_counters_
|
|
&& vmstat_period_ms_ == other.vmstat_period_ms_
|
|
&& vmstat_counters_ == other.vmstat_counters_
|
|
&& stat_period_ms_ == other.stat_period_ms_
|
|
&& stat_counters_ == other.stat_counters_;
|
|
}
|
|
|
|
bool SysStatsConfig::ParseFromArray(const void* raw, size_t size) {
|
|
meminfo_counters_.clear();
|
|
vmstat_counters_.clear();
|
|
stat_counters_.clear();
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* meminfo_period_ms */:
|
|
field.get(&meminfo_period_ms_);
|
|
break;
|
|
case 2 /* meminfo_counters */:
|
|
meminfo_counters_.emplace_back();
|
|
field.get(&meminfo_counters_.back());
|
|
break;
|
|
case 3 /* vmstat_period_ms */:
|
|
field.get(&vmstat_period_ms_);
|
|
break;
|
|
case 4 /* vmstat_counters */:
|
|
vmstat_counters_.emplace_back();
|
|
field.get(&vmstat_counters_.back());
|
|
break;
|
|
case 5 /* stat_period_ms */:
|
|
field.get(&stat_period_ms_);
|
|
break;
|
|
case 6 /* stat_counters */:
|
|
stat_counters_.emplace_back();
|
|
field.get(&stat_counters_.back());
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string SysStatsConfig::SerializeAsString() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> SysStatsConfig::SerializeAsArray() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void SysStatsConfig::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: meminfo_period_ms
|
|
if (_has_field_[1]) {
|
|
msg->AppendVarInt(1, meminfo_period_ms_);
|
|
}
|
|
|
|
// Field 2: meminfo_counters
|
|
for (auto& it : meminfo_counters_) {
|
|
msg->AppendVarInt(2, it);
|
|
}
|
|
|
|
// Field 3: vmstat_period_ms
|
|
if (_has_field_[3]) {
|
|
msg->AppendVarInt(3, vmstat_period_ms_);
|
|
}
|
|
|
|
// Field 4: vmstat_counters
|
|
for (auto& it : vmstat_counters_) {
|
|
msg->AppendVarInt(4, it);
|
|
}
|
|
|
|
// Field 5: stat_period_ms
|
|
if (_has_field_[5]) {
|
|
msg->AppendVarInt(5, stat_period_ms_);
|
|
}
|
|
|
|
// Field 6: stat_counters
|
|
for (auto& it : stat_counters_) {
|
|
msg->AppendVarInt(6, it);
|
|
}
|
|
|
|
msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
|
|
}
|
|
|
|
} // namespace perfetto
|
|
} // namespace protos
|
|
} // namespace gen
|
|
#pragma GCC diagnostic pop
|
|
// gen_amalgamated begin source: gen/protos/perfetto/config/track_event/track_event_config.gen.cc
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/scattered_heap_buffer.h"
|
|
// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
|
|
#pragma GCC diagnostic push
|
|
#pragma GCC diagnostic ignored "-Wfloat-equal"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/track_event/track_event_config.gen.h"
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace gen {
|
|
|
|
TrackEventConfig::TrackEventConfig() = default;
|
|
TrackEventConfig::~TrackEventConfig() = default;
|
|
TrackEventConfig::TrackEventConfig(const TrackEventConfig&) = default;
|
|
TrackEventConfig& TrackEventConfig::operator=(const TrackEventConfig&) = default;
|
|
TrackEventConfig::TrackEventConfig(TrackEventConfig&&) noexcept = default;
|
|
TrackEventConfig& TrackEventConfig::operator=(TrackEventConfig&&) = default;
|
|
|
|
bool TrackEventConfig::operator==(const TrackEventConfig& other) const {
|
|
return unknown_fields_ == other.unknown_fields_
|
|
&& disabled_categories_ == other.disabled_categories_
|
|
&& enabled_categories_ == other.enabled_categories_
|
|
&& disabled_tags_ == other.disabled_tags_
|
|
&& enabled_tags_ == other.enabled_tags_;
|
|
}
|
|
|
|
bool TrackEventConfig::ParseFromArray(const void* raw, size_t size) {
|
|
disabled_categories_.clear();
|
|
enabled_categories_.clear();
|
|
disabled_tags_.clear();
|
|
enabled_tags_.clear();
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* disabled_categories */:
|
|
disabled_categories_.emplace_back();
|
|
field.get(&disabled_categories_.back());
|
|
break;
|
|
case 2 /* enabled_categories */:
|
|
enabled_categories_.emplace_back();
|
|
field.get(&enabled_categories_.back());
|
|
break;
|
|
case 3 /* disabled_tags */:
|
|
disabled_tags_.emplace_back();
|
|
field.get(&disabled_tags_.back());
|
|
break;
|
|
case 4 /* enabled_tags */:
|
|
enabled_tags_.emplace_back();
|
|
field.get(&enabled_tags_.back());
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string TrackEventConfig::SerializeAsString() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> TrackEventConfig::SerializeAsArray() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void TrackEventConfig::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: disabled_categories
|
|
for (auto& it : disabled_categories_) {
|
|
msg->AppendString(1, it);
|
|
}
|
|
|
|
// Field 2: enabled_categories
|
|
for (auto& it : enabled_categories_) {
|
|
msg->AppendString(2, it);
|
|
}
|
|
|
|
// Field 3: disabled_tags
|
|
for (auto& it : disabled_tags_) {
|
|
msg->AppendString(3, it);
|
|
}
|
|
|
|
// Field 4: enabled_tags
|
|
for (auto& it : enabled_tags_) {
|
|
msg->AppendString(4, it);
|
|
}
|
|
|
|
msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
|
|
}
|
|
|
|
} // namespace perfetto
|
|
} // namespace protos
|
|
} // namespace gen
|
|
#pragma GCC diagnostic pop
|
|
// gen_amalgamated begin source: gen/protos/perfetto/config/chrome/chrome_config.gen.cc
|
|
// gen_amalgamated begin header: gen/protos/perfetto/config/chrome/chrome_config.gen.h
|
|
// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
|
|
#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_CONFIG_CHROME_CHROME_CONFIG_PROTO_CPP_H_
|
|
#define PERFETTO_PROTOS_PROTOS_PERFETTO_CONFIG_CHROME_CHROME_CONFIG_PROTO_CPP_H_
|
|
|
|
#include <stdint.h>
|
|
#include <bitset>
|
|
#include <vector>
|
|
#include <string>
|
|
#include <type_traits>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/cpp_message_obj.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/copyable_ptr.h"
|
|
// gen_amalgamated expanded: #include "perfetto/base/export.h"
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace gen {
|
|
class ChromeConfig;
|
|
} // namespace perfetto
|
|
} // namespace protos
|
|
} // namespace gen
|
|
|
|
namespace protozero {
|
|
class Message;
|
|
} // namespace protozero
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace gen {
|
|
|
|
class PERFETTO_EXPORT ChromeConfig : public ::protozero::CppMessageObj {
|
|
public:
|
|
enum FieldNumbers {
|
|
kTraceConfigFieldNumber = 1,
|
|
kPrivacyFilteringEnabledFieldNumber = 2,
|
|
kConvertToLegacyJsonFieldNumber = 3,
|
|
};
|
|
|
|
ChromeConfig();
|
|
~ChromeConfig() override;
|
|
ChromeConfig(ChromeConfig&&) noexcept;
|
|
ChromeConfig& operator=(ChromeConfig&&);
|
|
ChromeConfig(const ChromeConfig&);
|
|
ChromeConfig& operator=(const ChromeConfig&);
|
|
bool operator==(const ChromeConfig&) const;
|
|
bool operator!=(const ChromeConfig& other) const { return !(*this == other); }
|
|
|
|
bool ParseFromArray(const void*, size_t) override;
|
|
std::string SerializeAsString() const override;
|
|
std::vector<uint8_t> SerializeAsArray() const override;
|
|
void Serialize(::protozero::Message*) const;
|
|
|
|
bool has_trace_config() const { return _has_field_[1]; }
|
|
const std::string& trace_config() const { return trace_config_; }
|
|
void set_trace_config(const std::string& value) { trace_config_ = value; _has_field_.set(1); }
|
|
|
|
bool has_privacy_filtering_enabled() const { return _has_field_[2]; }
|
|
bool privacy_filtering_enabled() const { return privacy_filtering_enabled_; }
|
|
void set_privacy_filtering_enabled(bool value) { privacy_filtering_enabled_ = value; _has_field_.set(2); }
|
|
|
|
bool has_convert_to_legacy_json() const { return _has_field_[3]; }
|
|
bool convert_to_legacy_json() const { return convert_to_legacy_json_; }
|
|
void set_convert_to_legacy_json(bool value) { convert_to_legacy_json_ = value; _has_field_.set(3); }
|
|
|
|
private:
|
|
std::string trace_config_{};
|
|
bool privacy_filtering_enabled_{};
|
|
bool convert_to_legacy_json_{};
|
|
|
|
// Allows to preserve unknown protobuf fields for compatibility
|
|
// with future versions of .proto files.
|
|
std::string unknown_fields_;
|
|
|
|
std::bitset<4> _has_field_{};
|
|
};
|
|
|
|
} // namespace perfetto
|
|
} // namespace protos
|
|
} // namespace gen
|
|
|
|
#endif // PERFETTO_PROTOS_PROTOS_PERFETTO_CONFIG_CHROME_CHROME_CONFIG_PROTO_CPP_H_
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/scattered_heap_buffer.h"
|
|
// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
|
|
#pragma GCC diagnostic push
|
|
#pragma GCC diagnostic ignored "-Wfloat-equal"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/chrome/chrome_config.gen.h"
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace gen {
|
|
|
|
ChromeConfig::ChromeConfig() = default;
|
|
ChromeConfig::~ChromeConfig() = default;
|
|
ChromeConfig::ChromeConfig(const ChromeConfig&) = default;
|
|
ChromeConfig& ChromeConfig::operator=(const ChromeConfig&) = default;
|
|
ChromeConfig::ChromeConfig(ChromeConfig&&) noexcept = default;
|
|
ChromeConfig& ChromeConfig::operator=(ChromeConfig&&) = default;
|
|
|
|
bool ChromeConfig::operator==(const ChromeConfig& other) const {
|
|
return unknown_fields_ == other.unknown_fields_
|
|
&& trace_config_ == other.trace_config_
|
|
&& privacy_filtering_enabled_ == other.privacy_filtering_enabled_
|
|
&& convert_to_legacy_json_ == other.convert_to_legacy_json_;
|
|
}
|
|
|
|
bool ChromeConfig::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* trace_config */:
|
|
field.get(&trace_config_);
|
|
break;
|
|
case 2 /* privacy_filtering_enabled */:
|
|
field.get(&privacy_filtering_enabled_);
|
|
break;
|
|
case 3 /* convert_to_legacy_json */:
|
|
field.get(&convert_to_legacy_json_);
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string ChromeConfig::SerializeAsString() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> ChromeConfig::SerializeAsArray() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void ChromeConfig::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: trace_config
|
|
if (_has_field_[1]) {
|
|
msg->AppendString(1, trace_config_);
|
|
}
|
|
|
|
// Field 2: privacy_filtering_enabled
|
|
if (_has_field_[2]) {
|
|
msg->AppendTinyVarInt(2, privacy_filtering_enabled_);
|
|
}
|
|
|
|
// Field 3: convert_to_legacy_json
|
|
if (_has_field_[3]) {
|
|
msg->AppendTinyVarInt(3, convert_to_legacy_json_);
|
|
}
|
|
|
|
msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
|
|
}
|
|
|
|
} // namespace perfetto
|
|
} // namespace protos
|
|
} // namespace gen
|
|
#pragma GCC diagnostic pop
|
|
// gen_amalgamated begin source: gen/protos/perfetto/config/data_source_config.gen.cc
|
|
// gen_amalgamated begin header: gen/protos/perfetto/config/test_config.gen.h
|
|
// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
|
|
#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_CONFIG_TEST_CONFIG_PROTO_CPP_H_
|
|
#define PERFETTO_PROTOS_PROTOS_PERFETTO_CONFIG_TEST_CONFIG_PROTO_CPP_H_
|
|
|
|
#include <stdint.h>
|
|
#include <bitset>
|
|
#include <vector>
|
|
#include <string>
|
|
#include <type_traits>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/cpp_message_obj.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/copyable_ptr.h"
|
|
// gen_amalgamated expanded: #include "perfetto/base/export.h"
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace gen {
|
|
class TestConfig;
|
|
class TestConfig_DummyFields;
|
|
} // namespace perfetto
|
|
} // namespace protos
|
|
} // namespace gen
|
|
|
|
namespace protozero {
|
|
class Message;
|
|
} // namespace protozero
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace gen {
|
|
|
|
class PERFETTO_EXPORT TestConfig : public ::protozero::CppMessageObj {
|
|
public:
|
|
using DummyFields = TestConfig_DummyFields;
|
|
enum FieldNumbers {
|
|
kMessageCountFieldNumber = 1,
|
|
kMaxMessagesPerSecondFieldNumber = 2,
|
|
kSeedFieldNumber = 3,
|
|
kMessageSizeFieldNumber = 4,
|
|
kSendBatchOnRegisterFieldNumber = 5,
|
|
kDummyFieldsFieldNumber = 6,
|
|
};
|
|
|
|
TestConfig();
|
|
~TestConfig() override;
|
|
TestConfig(TestConfig&&) noexcept;
|
|
TestConfig& operator=(TestConfig&&);
|
|
TestConfig(const TestConfig&);
|
|
TestConfig& operator=(const TestConfig&);
|
|
bool operator==(const TestConfig&) const;
|
|
bool operator!=(const TestConfig& other) const { return !(*this == other); }
|
|
|
|
bool ParseFromArray(const void*, size_t) override;
|
|
std::string SerializeAsString() const override;
|
|
std::vector<uint8_t> SerializeAsArray() const override;
|
|
void Serialize(::protozero::Message*) const;
|
|
|
|
bool has_message_count() const { return _has_field_[1]; }
|
|
uint32_t message_count() const { return message_count_; }
|
|
void set_message_count(uint32_t value) { message_count_ = value; _has_field_.set(1); }
|
|
|
|
bool has_max_messages_per_second() const { return _has_field_[2]; }
|
|
uint32_t max_messages_per_second() const { return max_messages_per_second_; }
|
|
void set_max_messages_per_second(uint32_t value) { max_messages_per_second_ = value; _has_field_.set(2); }
|
|
|
|
bool has_seed() const { return _has_field_[3]; }
|
|
uint32_t seed() const { return seed_; }
|
|
void set_seed(uint32_t value) { seed_ = value; _has_field_.set(3); }
|
|
|
|
bool has_message_size() const { return _has_field_[4]; }
|
|
uint32_t message_size() const { return message_size_; }
|
|
void set_message_size(uint32_t value) { message_size_ = value; _has_field_.set(4); }
|
|
|
|
bool has_send_batch_on_register() const { return _has_field_[5]; }
|
|
bool send_batch_on_register() const { return send_batch_on_register_; }
|
|
void set_send_batch_on_register(bool value) { send_batch_on_register_ = value; _has_field_.set(5); }
|
|
|
|
bool has_dummy_fields() const { return _has_field_[6]; }
|
|
const TestConfig_DummyFields& dummy_fields() const { return *dummy_fields_; }
|
|
TestConfig_DummyFields* mutable_dummy_fields() { _has_field_.set(6); return dummy_fields_.get(); }
|
|
|
|
private:
|
|
uint32_t message_count_{};
|
|
uint32_t max_messages_per_second_{};
|
|
uint32_t seed_{};
|
|
uint32_t message_size_{};
|
|
bool send_batch_on_register_{};
|
|
::protozero::CopyablePtr<TestConfig_DummyFields> dummy_fields_;
|
|
|
|
// Allows to preserve unknown protobuf fields for compatibility
|
|
// with future versions of .proto files.
|
|
std::string unknown_fields_;
|
|
|
|
std::bitset<7> _has_field_{};
|
|
};
|
|
|
|
|
|
class PERFETTO_EXPORT TestConfig_DummyFields : public ::protozero::CppMessageObj {
|
|
public:
|
|
enum FieldNumbers {
|
|
kFieldUint32FieldNumber = 1,
|
|
kFieldInt32FieldNumber = 2,
|
|
kFieldUint64FieldNumber = 3,
|
|
kFieldInt64FieldNumber = 4,
|
|
kFieldFixed64FieldNumber = 5,
|
|
kFieldSfixed64FieldNumber = 6,
|
|
kFieldFixed32FieldNumber = 7,
|
|
kFieldSfixed32FieldNumber = 8,
|
|
kFieldDoubleFieldNumber = 9,
|
|
kFieldFloatFieldNumber = 10,
|
|
kFieldSint64FieldNumber = 11,
|
|
kFieldSint32FieldNumber = 12,
|
|
kFieldStringFieldNumber = 13,
|
|
kFieldBytesFieldNumber = 14,
|
|
};
|
|
|
|
TestConfig_DummyFields();
|
|
~TestConfig_DummyFields() override;
|
|
TestConfig_DummyFields(TestConfig_DummyFields&&) noexcept;
|
|
TestConfig_DummyFields& operator=(TestConfig_DummyFields&&);
|
|
TestConfig_DummyFields(const TestConfig_DummyFields&);
|
|
TestConfig_DummyFields& operator=(const TestConfig_DummyFields&);
|
|
bool operator==(const TestConfig_DummyFields&) const;
|
|
bool operator!=(const TestConfig_DummyFields& other) const { return !(*this == other); }
|
|
|
|
bool ParseFromArray(const void*, size_t) override;
|
|
std::string SerializeAsString() const override;
|
|
std::vector<uint8_t> SerializeAsArray() const override;
|
|
void Serialize(::protozero::Message*) const;
|
|
|
|
bool has_field_uint32() const { return _has_field_[1]; }
|
|
uint32_t field_uint32() const { return field_uint32_; }
|
|
void set_field_uint32(uint32_t value) { field_uint32_ = value; _has_field_.set(1); }
|
|
|
|
bool has_field_int32() const { return _has_field_[2]; }
|
|
int32_t field_int32() const { return field_int32_; }
|
|
void set_field_int32(int32_t value) { field_int32_ = value; _has_field_.set(2); }
|
|
|
|
bool has_field_uint64() const { return _has_field_[3]; }
|
|
uint64_t field_uint64() const { return field_uint64_; }
|
|
void set_field_uint64(uint64_t value) { field_uint64_ = value; _has_field_.set(3); }
|
|
|
|
bool has_field_int64() const { return _has_field_[4]; }
|
|
int64_t field_int64() const { return field_int64_; }
|
|
void set_field_int64(int64_t value) { field_int64_ = value; _has_field_.set(4); }
|
|
|
|
bool has_field_fixed64() const { return _has_field_[5]; }
|
|
uint64_t field_fixed64() const { return field_fixed64_; }
|
|
void set_field_fixed64(uint64_t value) { field_fixed64_ = value; _has_field_.set(5); }
|
|
|
|
bool has_field_sfixed64() const { return _has_field_[6]; }
|
|
int64_t field_sfixed64() const { return field_sfixed64_; }
|
|
void set_field_sfixed64(int64_t value) { field_sfixed64_ = value; _has_field_.set(6); }
|
|
|
|
bool has_field_fixed32() const { return _has_field_[7]; }
|
|
uint32_t field_fixed32() const { return field_fixed32_; }
|
|
void set_field_fixed32(uint32_t value) { field_fixed32_ = value; _has_field_.set(7); }
|
|
|
|
bool has_field_sfixed32() const { return _has_field_[8]; }
|
|
int32_t field_sfixed32() const { return field_sfixed32_; }
|
|
void set_field_sfixed32(int32_t value) { field_sfixed32_ = value; _has_field_.set(8); }
|
|
|
|
bool has_field_double() const { return _has_field_[9]; }
|
|
double field_double() const { return field_double_; }
|
|
void set_field_double(double value) { field_double_ = value; _has_field_.set(9); }
|
|
|
|
bool has_field_float() const { return _has_field_[10]; }
|
|
float field_float() const { return field_float_; }
|
|
void set_field_float(float value) { field_float_ = value; _has_field_.set(10); }
|
|
|
|
bool has_field_sint64() const { return _has_field_[11]; }
|
|
int64_t field_sint64() const { return field_sint64_; }
|
|
void set_field_sint64(int64_t value) { field_sint64_ = value; _has_field_.set(11); }
|
|
|
|
bool has_field_sint32() const { return _has_field_[12]; }
|
|
int32_t field_sint32() const { return field_sint32_; }
|
|
void set_field_sint32(int32_t value) { field_sint32_ = value; _has_field_.set(12); }
|
|
|
|
bool has_field_string() const { return _has_field_[13]; }
|
|
const std::string& field_string() const { return field_string_; }
|
|
void set_field_string(const std::string& value) { field_string_ = value; _has_field_.set(13); }
|
|
|
|
bool has_field_bytes() const { return _has_field_[14]; }
|
|
const std::string& field_bytes() const { return field_bytes_; }
|
|
void set_field_bytes(const std::string& value) { field_bytes_ = value; _has_field_.set(14); }
|
|
void set_field_bytes(const void* p, size_t s) { field_bytes_.assign(reinterpret_cast<const char*>(p), s); _has_field_.set(14); }
|
|
|
|
private:
|
|
uint32_t field_uint32_{};
|
|
int32_t field_int32_{};
|
|
uint64_t field_uint64_{};
|
|
int64_t field_int64_{};
|
|
uint64_t field_fixed64_{};
|
|
int64_t field_sfixed64_{};
|
|
uint32_t field_fixed32_{};
|
|
int32_t field_sfixed32_{};
|
|
double field_double_{};
|
|
float field_float_{};
|
|
int64_t field_sint64_{};
|
|
int32_t field_sint32_{};
|
|
std::string field_string_{};
|
|
std::string field_bytes_{};
|
|
|
|
// Allows to preserve unknown protobuf fields for compatibility
|
|
// with future versions of .proto files.
|
|
std::string unknown_fields_;
|
|
|
|
std::bitset<15> _has_field_{};
|
|
};
|
|
|
|
} // namespace perfetto
|
|
} // namespace protos
|
|
} // namespace gen
|
|
|
|
#endif // PERFETTO_PROTOS_PROTOS_PERFETTO_CONFIG_TEST_CONFIG_PROTO_CPP_H_
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/scattered_heap_buffer.h"
|
|
// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
|
|
#pragma GCC diagnostic push
|
|
#pragma GCC diagnostic ignored "-Wfloat-equal"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/data_source_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/test_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/chrome/chrome_config.gen.h"
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace gen {
|
|
|
|
DataSourceConfig::DataSourceConfig() = default;
|
|
DataSourceConfig::~DataSourceConfig() = default;
|
|
DataSourceConfig::DataSourceConfig(const DataSourceConfig&) = default;
|
|
DataSourceConfig& DataSourceConfig::operator=(const DataSourceConfig&) = default;
|
|
DataSourceConfig::DataSourceConfig(DataSourceConfig&&) noexcept = default;
|
|
DataSourceConfig& DataSourceConfig::operator=(DataSourceConfig&&) = default;
|
|
|
|
bool DataSourceConfig::operator==(const DataSourceConfig& other) const {
|
|
return unknown_fields_ == other.unknown_fields_
|
|
&& name_ == other.name_
|
|
&& target_buffer_ == other.target_buffer_
|
|
&& trace_duration_ms_ == other.trace_duration_ms_
|
|
&& stop_timeout_ms_ == other.stop_timeout_ms_
|
|
&& enable_extra_guardrails_ == other.enable_extra_guardrails_
|
|
&& tracing_session_id_ == other.tracing_session_id_
|
|
&& ftrace_config_ == other.ftrace_config_
|
|
&& inode_file_config_ == other.inode_file_config_
|
|
&& process_stats_config_ == other.process_stats_config_
|
|
&& sys_stats_config_ == other.sys_stats_config_
|
|
&& heapprofd_config_ == other.heapprofd_config_
|
|
&& java_hprof_config_ == other.java_hprof_config_
|
|
&& android_power_config_ == other.android_power_config_
|
|
&& android_log_config_ == other.android_log_config_
|
|
&& gpu_counter_config_ == other.gpu_counter_config_
|
|
&& packages_list_config_ == other.packages_list_config_
|
|
&& perf_event_config_ == other.perf_event_config_
|
|
&& vulkan_memory_config_ == other.vulkan_memory_config_
|
|
&& track_event_config_ == other.track_event_config_
|
|
&& android_polled_state_config_ == other.android_polled_state_config_
|
|
&& chrome_config_ == other.chrome_config_
|
|
&& legacy_config_ == other.legacy_config_
|
|
&& for_testing_ == other.for_testing_;
|
|
}
|
|
|
|
bool DataSourceConfig::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* name */:
|
|
field.get(&name_);
|
|
break;
|
|
case 2 /* target_buffer */:
|
|
field.get(&target_buffer_);
|
|
break;
|
|
case 3 /* trace_duration_ms */:
|
|
field.get(&trace_duration_ms_);
|
|
break;
|
|
case 7 /* stop_timeout_ms */:
|
|
field.get(&stop_timeout_ms_);
|
|
break;
|
|
case 6 /* enable_extra_guardrails */:
|
|
field.get(&enable_extra_guardrails_);
|
|
break;
|
|
case 4 /* tracing_session_id */:
|
|
field.get(&tracing_session_id_);
|
|
break;
|
|
case 100 /* ftrace_config */:
|
|
ftrace_config_ = field.as_std_string();
|
|
break;
|
|
case 102 /* inode_file_config */:
|
|
inode_file_config_ = field.as_std_string();
|
|
break;
|
|
case 103 /* process_stats_config */:
|
|
process_stats_config_ = field.as_std_string();
|
|
break;
|
|
case 104 /* sys_stats_config */:
|
|
sys_stats_config_ = field.as_std_string();
|
|
break;
|
|
case 105 /* heapprofd_config */:
|
|
heapprofd_config_ = field.as_std_string();
|
|
break;
|
|
case 110 /* java_hprof_config */:
|
|
java_hprof_config_ = field.as_std_string();
|
|
break;
|
|
case 106 /* android_power_config */:
|
|
android_power_config_ = field.as_std_string();
|
|
break;
|
|
case 107 /* android_log_config */:
|
|
android_log_config_ = field.as_std_string();
|
|
break;
|
|
case 108 /* gpu_counter_config */:
|
|
gpu_counter_config_ = field.as_std_string();
|
|
break;
|
|
case 109 /* packages_list_config */:
|
|
packages_list_config_ = field.as_std_string();
|
|
break;
|
|
case 111 /* perf_event_config */:
|
|
perf_event_config_ = field.as_std_string();
|
|
break;
|
|
case 112 /* vulkan_memory_config */:
|
|
vulkan_memory_config_ = field.as_std_string();
|
|
break;
|
|
case 113 /* track_event_config */:
|
|
track_event_config_ = field.as_std_string();
|
|
break;
|
|
case 114 /* android_polled_state_config */:
|
|
android_polled_state_config_ = field.as_std_string();
|
|
break;
|
|
case 101 /* chrome_config */:
|
|
(*chrome_config_).ParseFromString(field.as_std_string());
|
|
break;
|
|
case 1000 /* legacy_config */:
|
|
field.get(&legacy_config_);
|
|
break;
|
|
case 1001 /* for_testing */:
|
|
(*for_testing_).ParseFromString(field.as_std_string());
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string DataSourceConfig::SerializeAsString() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> DataSourceConfig::SerializeAsArray() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void DataSourceConfig::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: name
|
|
if (_has_field_[1]) {
|
|
msg->AppendString(1, name_);
|
|
}
|
|
|
|
// Field 2: target_buffer
|
|
if (_has_field_[2]) {
|
|
msg->AppendVarInt(2, target_buffer_);
|
|
}
|
|
|
|
// Field 3: trace_duration_ms
|
|
if (_has_field_[3]) {
|
|
msg->AppendVarInt(3, trace_duration_ms_);
|
|
}
|
|
|
|
// Field 7: stop_timeout_ms
|
|
if (_has_field_[7]) {
|
|
msg->AppendVarInt(7, stop_timeout_ms_);
|
|
}
|
|
|
|
// Field 6: enable_extra_guardrails
|
|
if (_has_field_[6]) {
|
|
msg->AppendTinyVarInt(6, enable_extra_guardrails_);
|
|
}
|
|
|
|
// Field 4: tracing_session_id
|
|
if (_has_field_[4]) {
|
|
msg->AppendVarInt(4, tracing_session_id_);
|
|
}
|
|
|
|
// Field 100: ftrace_config
|
|
if (_has_field_[100]) {
|
|
msg->AppendString(100, ftrace_config_);
|
|
}
|
|
|
|
// Field 102: inode_file_config
|
|
if (_has_field_[102]) {
|
|
msg->AppendString(102, inode_file_config_);
|
|
}
|
|
|
|
// Field 103: process_stats_config
|
|
if (_has_field_[103]) {
|
|
msg->AppendString(103, process_stats_config_);
|
|
}
|
|
|
|
// Field 104: sys_stats_config
|
|
if (_has_field_[104]) {
|
|
msg->AppendString(104, sys_stats_config_);
|
|
}
|
|
|
|
// Field 105: heapprofd_config
|
|
if (_has_field_[105]) {
|
|
msg->AppendString(105, heapprofd_config_);
|
|
}
|
|
|
|
// Field 110: java_hprof_config
|
|
if (_has_field_[110]) {
|
|
msg->AppendString(110, java_hprof_config_);
|
|
}
|
|
|
|
// Field 106: android_power_config
|
|
if (_has_field_[106]) {
|
|
msg->AppendString(106, android_power_config_);
|
|
}
|
|
|
|
// Field 107: android_log_config
|
|
if (_has_field_[107]) {
|
|
msg->AppendString(107, android_log_config_);
|
|
}
|
|
|
|
// Field 108: gpu_counter_config
|
|
if (_has_field_[108]) {
|
|
msg->AppendString(108, gpu_counter_config_);
|
|
}
|
|
|
|
// Field 109: packages_list_config
|
|
if (_has_field_[109]) {
|
|
msg->AppendString(109, packages_list_config_);
|
|
}
|
|
|
|
// Field 111: perf_event_config
|
|
if (_has_field_[111]) {
|
|
msg->AppendString(111, perf_event_config_);
|
|
}
|
|
|
|
// Field 112: vulkan_memory_config
|
|
if (_has_field_[112]) {
|
|
msg->AppendString(112, vulkan_memory_config_);
|
|
}
|
|
|
|
// Field 113: track_event_config
|
|
if (_has_field_[113]) {
|
|
msg->AppendString(113, track_event_config_);
|
|
}
|
|
|
|
// Field 114: android_polled_state_config
|
|
if (_has_field_[114]) {
|
|
msg->AppendString(114, android_polled_state_config_);
|
|
}
|
|
|
|
// Field 101: chrome_config
|
|
if (_has_field_[101]) {
|
|
(*chrome_config_).Serialize(msg->BeginNestedMessage<::protozero::Message>(101));
|
|
}
|
|
|
|
// Field 1000: legacy_config
|
|
if (_has_field_[1000]) {
|
|
msg->AppendString(1000, legacy_config_);
|
|
}
|
|
|
|
// Field 1001: for_testing
|
|
if (_has_field_[1001]) {
|
|
(*for_testing_).Serialize(msg->BeginNestedMessage<::protozero::Message>(1001));
|
|
}
|
|
|
|
msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
|
|
}
|
|
|
|
} // namespace perfetto
|
|
} // namespace protos
|
|
} // namespace gen
|
|
#pragma GCC diagnostic pop
|
|
// gen_amalgamated begin source: gen/protos/perfetto/config/stress_test_config.gen.cc
|
|
// gen_amalgamated begin header: gen/protos/perfetto/config/stress_test_config.gen.h
|
|
// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
|
|
#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_CONFIG_STRESS_TEST_CONFIG_PROTO_CPP_H_
|
|
#define PERFETTO_PROTOS_PROTOS_PERFETTO_CONFIG_STRESS_TEST_CONFIG_PROTO_CPP_H_
|
|
|
|
#include <stdint.h>
|
|
#include <bitset>
|
|
#include <vector>
|
|
#include <string>
|
|
#include <type_traits>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/cpp_message_obj.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/copyable_ptr.h"
|
|
// gen_amalgamated expanded: #include "perfetto/base/export.h"
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace gen {
|
|
class StressTestConfig;
|
|
class StressTestConfig_WriterTiming;
|
|
class TraceConfig;
|
|
class TraceConfig_IncidentReportConfig;
|
|
class TraceConfig_IncrementalStateConfig;
|
|
class TraceConfig_TriggerConfig;
|
|
class TraceConfig_TriggerConfig_Trigger;
|
|
class TraceConfig_GuardrailOverrides;
|
|
class TraceConfig_StatsdMetadata;
|
|
class TraceConfig_ProducerConfig;
|
|
class TraceConfig_BuiltinDataSource;
|
|
class TraceConfig_DataSource;
|
|
class DataSourceConfig;
|
|
class TestConfig;
|
|
class TestConfig_DummyFields;
|
|
class ChromeConfig;
|
|
class TraceConfig_BufferConfig;
|
|
enum TraceConfig_LockdownModeOperation : int;
|
|
enum TraceConfig_CompressionType : int;
|
|
enum TraceConfig_TriggerConfig_TriggerMode : int;
|
|
enum BuiltinClock : int;
|
|
enum TraceConfig_BufferConfig_FillPolicy : int;
|
|
} // namespace perfetto
|
|
} // namespace protos
|
|
} // namespace gen
|
|
|
|
namespace protozero {
|
|
class Message;
|
|
} // namespace protozero
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace gen {
|
|
|
|
class PERFETTO_EXPORT StressTestConfig : public ::protozero::CppMessageObj {
|
|
public:
|
|
using WriterTiming = StressTestConfig_WriterTiming;
|
|
enum FieldNumbers {
|
|
kTraceConfigFieldNumber = 1,
|
|
kShmemSizeKbFieldNumber = 2,
|
|
kShmemPageSizeKbFieldNumber = 3,
|
|
kNumProcessesFieldNumber = 4,
|
|
kNumThreadsFieldNumber = 5,
|
|
kMaxEventsFieldNumber = 6,
|
|
kNestingFieldNumber = 7,
|
|
kSteadyStateTimingsFieldNumber = 8,
|
|
kBurstPeriodMsFieldNumber = 9,
|
|
kBurstDurationMsFieldNumber = 10,
|
|
kBurstTimingsFieldNumber = 11,
|
|
};
|
|
|
|
StressTestConfig();
|
|
~StressTestConfig() override;
|
|
StressTestConfig(StressTestConfig&&) noexcept;
|
|
StressTestConfig& operator=(StressTestConfig&&);
|
|
StressTestConfig(const StressTestConfig&);
|
|
StressTestConfig& operator=(const StressTestConfig&);
|
|
bool operator==(const StressTestConfig&) const;
|
|
bool operator!=(const StressTestConfig& other) const { return !(*this == other); }
|
|
|
|
bool ParseFromArray(const void*, size_t) override;
|
|
std::string SerializeAsString() const override;
|
|
std::vector<uint8_t> SerializeAsArray() const override;
|
|
void Serialize(::protozero::Message*) const;
|
|
|
|
bool has_trace_config() const { return _has_field_[1]; }
|
|
const TraceConfig& trace_config() const { return *trace_config_; }
|
|
TraceConfig* mutable_trace_config() { _has_field_.set(1); return trace_config_.get(); }
|
|
|
|
bool has_shmem_size_kb() const { return _has_field_[2]; }
|
|
uint32_t shmem_size_kb() const { return shmem_size_kb_; }
|
|
void set_shmem_size_kb(uint32_t value) { shmem_size_kb_ = value; _has_field_.set(2); }
|
|
|
|
bool has_shmem_page_size_kb() const { return _has_field_[3]; }
|
|
uint32_t shmem_page_size_kb() const { return shmem_page_size_kb_; }
|
|
void set_shmem_page_size_kb(uint32_t value) { shmem_page_size_kb_ = value; _has_field_.set(3); }
|
|
|
|
bool has_num_processes() const { return _has_field_[4]; }
|
|
uint32_t num_processes() const { return num_processes_; }
|
|
void set_num_processes(uint32_t value) { num_processes_ = value; _has_field_.set(4); }
|
|
|
|
bool has_num_threads() const { return _has_field_[5]; }
|
|
uint32_t num_threads() const { return num_threads_; }
|
|
void set_num_threads(uint32_t value) { num_threads_ = value; _has_field_.set(5); }
|
|
|
|
bool has_max_events() const { return _has_field_[6]; }
|
|
uint32_t max_events() const { return max_events_; }
|
|
void set_max_events(uint32_t value) { max_events_ = value; _has_field_.set(6); }
|
|
|
|
bool has_nesting() const { return _has_field_[7]; }
|
|
uint32_t nesting() const { return nesting_; }
|
|
void set_nesting(uint32_t value) { nesting_ = value; _has_field_.set(7); }
|
|
|
|
bool has_steady_state_timings() const { return _has_field_[8]; }
|
|
const StressTestConfig_WriterTiming& steady_state_timings() const { return *steady_state_timings_; }
|
|
StressTestConfig_WriterTiming* mutable_steady_state_timings() { _has_field_.set(8); return steady_state_timings_.get(); }
|
|
|
|
bool has_burst_period_ms() const { return _has_field_[9]; }
|
|
uint32_t burst_period_ms() const { return burst_period_ms_; }
|
|
void set_burst_period_ms(uint32_t value) { burst_period_ms_ = value; _has_field_.set(9); }
|
|
|
|
bool has_burst_duration_ms() const { return _has_field_[10]; }
|
|
uint32_t burst_duration_ms() const { return burst_duration_ms_; }
|
|
void set_burst_duration_ms(uint32_t value) { burst_duration_ms_ = value; _has_field_.set(10); }
|
|
|
|
bool has_burst_timings() const { return _has_field_[11]; }
|
|
const StressTestConfig_WriterTiming& burst_timings() const { return *burst_timings_; }
|
|
StressTestConfig_WriterTiming* mutable_burst_timings() { _has_field_.set(11); return burst_timings_.get(); }
|
|
|
|
private:
|
|
::protozero::CopyablePtr<TraceConfig> trace_config_;
|
|
uint32_t shmem_size_kb_{};
|
|
uint32_t shmem_page_size_kb_{};
|
|
uint32_t num_processes_{};
|
|
uint32_t num_threads_{};
|
|
uint32_t max_events_{};
|
|
uint32_t nesting_{};
|
|
::protozero::CopyablePtr<StressTestConfig_WriterTiming> steady_state_timings_;
|
|
uint32_t burst_period_ms_{};
|
|
uint32_t burst_duration_ms_{};
|
|
::protozero::CopyablePtr<StressTestConfig_WriterTiming> burst_timings_;
|
|
|
|
// Allows to preserve unknown protobuf fields for compatibility
|
|
// with future versions of .proto files.
|
|
std::string unknown_fields_;
|
|
|
|
std::bitset<12> _has_field_{};
|
|
};
|
|
|
|
|
|
class PERFETTO_EXPORT StressTestConfig_WriterTiming : public ::protozero::CppMessageObj {
|
|
public:
|
|
enum FieldNumbers {
|
|
kPayloadMeanFieldNumber = 1,
|
|
kPayloadStddevFieldNumber = 2,
|
|
kRateMeanFieldNumber = 3,
|
|
kRateStddevFieldNumber = 4,
|
|
kPayloadWriteTimeMsFieldNumber = 5,
|
|
};
|
|
|
|
StressTestConfig_WriterTiming();
|
|
~StressTestConfig_WriterTiming() override;
|
|
StressTestConfig_WriterTiming(StressTestConfig_WriterTiming&&) noexcept;
|
|
StressTestConfig_WriterTiming& operator=(StressTestConfig_WriterTiming&&);
|
|
StressTestConfig_WriterTiming(const StressTestConfig_WriterTiming&);
|
|
StressTestConfig_WriterTiming& operator=(const StressTestConfig_WriterTiming&);
|
|
bool operator==(const StressTestConfig_WriterTiming&) const;
|
|
bool operator!=(const StressTestConfig_WriterTiming& other) const { return !(*this == other); }
|
|
|
|
bool ParseFromArray(const void*, size_t) override;
|
|
std::string SerializeAsString() const override;
|
|
std::vector<uint8_t> SerializeAsArray() const override;
|
|
void Serialize(::protozero::Message*) const;
|
|
|
|
bool has_payload_mean() const { return _has_field_[1]; }
|
|
double payload_mean() const { return payload_mean_; }
|
|
void set_payload_mean(double value) { payload_mean_ = value; _has_field_.set(1); }
|
|
|
|
bool has_payload_stddev() const { return _has_field_[2]; }
|
|
double payload_stddev() const { return payload_stddev_; }
|
|
void set_payload_stddev(double value) { payload_stddev_ = value; _has_field_.set(2); }
|
|
|
|
bool has_rate_mean() const { return _has_field_[3]; }
|
|
double rate_mean() const { return rate_mean_; }
|
|
void set_rate_mean(double value) { rate_mean_ = value; _has_field_.set(3); }
|
|
|
|
bool has_rate_stddev() const { return _has_field_[4]; }
|
|
double rate_stddev() const { return rate_stddev_; }
|
|
void set_rate_stddev(double value) { rate_stddev_ = value; _has_field_.set(4); }
|
|
|
|
bool has_payload_write_time_ms() const { return _has_field_[5]; }
|
|
uint32_t payload_write_time_ms() const { return payload_write_time_ms_; }
|
|
void set_payload_write_time_ms(uint32_t value) { payload_write_time_ms_ = value; _has_field_.set(5); }
|
|
|
|
private:
|
|
double payload_mean_{};
|
|
double payload_stddev_{};
|
|
double rate_mean_{};
|
|
double rate_stddev_{};
|
|
uint32_t payload_write_time_ms_{};
|
|
|
|
// Allows to preserve unknown protobuf fields for compatibility
|
|
// with future versions of .proto files.
|
|
std::string unknown_fields_;
|
|
|
|
std::bitset<6> _has_field_{};
|
|
};
|
|
|
|
} // namespace perfetto
|
|
} // namespace protos
|
|
} // namespace gen
|
|
|
|
#endif // PERFETTO_PROTOS_PROTOS_PERFETTO_CONFIG_STRESS_TEST_CONFIG_PROTO_CPP_H_
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/scattered_heap_buffer.h"
|
|
// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
|
|
#pragma GCC diagnostic push
|
|
#pragma GCC diagnostic ignored "-Wfloat-equal"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/stress_test_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/trace_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/common/builtin_clock.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/data_source_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/track_event/track_event_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/test_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/sys_stats/sys_stats_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/common/sys_stats_counters.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/profiling/perf_event_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/profiling/java_hprof_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/profiling/heapprofd_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/process_stats/process_stats_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/power/android_power_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/inode_file/inode_file_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/gpu/vulkan_memory_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/gpu/gpu_counter_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/ftrace/ftrace_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/chrome/chrome_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/android/packages_list_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/android/android_polled_state_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/android/android_log_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/common/android_log_constants.gen.h"
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace gen {
|
|
|
|
StressTestConfig::StressTestConfig() = default;
|
|
StressTestConfig::~StressTestConfig() = default;
|
|
StressTestConfig::StressTestConfig(const StressTestConfig&) = default;
|
|
StressTestConfig& StressTestConfig::operator=(const StressTestConfig&) = default;
|
|
StressTestConfig::StressTestConfig(StressTestConfig&&) noexcept = default;
|
|
StressTestConfig& StressTestConfig::operator=(StressTestConfig&&) = default;
|
|
|
|
bool StressTestConfig::operator==(const StressTestConfig& other) const {
|
|
return unknown_fields_ == other.unknown_fields_
|
|
&& trace_config_ == other.trace_config_
|
|
&& shmem_size_kb_ == other.shmem_size_kb_
|
|
&& shmem_page_size_kb_ == other.shmem_page_size_kb_
|
|
&& num_processes_ == other.num_processes_
|
|
&& num_threads_ == other.num_threads_
|
|
&& max_events_ == other.max_events_
|
|
&& nesting_ == other.nesting_
|
|
&& steady_state_timings_ == other.steady_state_timings_
|
|
&& burst_period_ms_ == other.burst_period_ms_
|
|
&& burst_duration_ms_ == other.burst_duration_ms_
|
|
&& burst_timings_ == other.burst_timings_;
|
|
}
|
|
|
|
bool StressTestConfig::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* trace_config */:
|
|
(*trace_config_).ParseFromString(field.as_std_string());
|
|
break;
|
|
case 2 /* shmem_size_kb */:
|
|
field.get(&shmem_size_kb_);
|
|
break;
|
|
case 3 /* shmem_page_size_kb */:
|
|
field.get(&shmem_page_size_kb_);
|
|
break;
|
|
case 4 /* num_processes */:
|
|
field.get(&num_processes_);
|
|
break;
|
|
case 5 /* num_threads */:
|
|
field.get(&num_threads_);
|
|
break;
|
|
case 6 /* max_events */:
|
|
field.get(&max_events_);
|
|
break;
|
|
case 7 /* nesting */:
|
|
field.get(&nesting_);
|
|
break;
|
|
case 8 /* steady_state_timings */:
|
|
(*steady_state_timings_).ParseFromString(field.as_std_string());
|
|
break;
|
|
case 9 /* burst_period_ms */:
|
|
field.get(&burst_period_ms_);
|
|
break;
|
|
case 10 /* burst_duration_ms */:
|
|
field.get(&burst_duration_ms_);
|
|
break;
|
|
case 11 /* burst_timings */:
|
|
(*burst_timings_).ParseFromString(field.as_std_string());
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string StressTestConfig::SerializeAsString() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> StressTestConfig::SerializeAsArray() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void StressTestConfig::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: trace_config
|
|
if (_has_field_[1]) {
|
|
(*trace_config_).Serialize(msg->BeginNestedMessage<::protozero::Message>(1));
|
|
}
|
|
|
|
// Field 2: shmem_size_kb
|
|
if (_has_field_[2]) {
|
|
msg->AppendVarInt(2, shmem_size_kb_);
|
|
}
|
|
|
|
// Field 3: shmem_page_size_kb
|
|
if (_has_field_[3]) {
|
|
msg->AppendVarInt(3, shmem_page_size_kb_);
|
|
}
|
|
|
|
// Field 4: num_processes
|
|
if (_has_field_[4]) {
|
|
msg->AppendVarInt(4, num_processes_);
|
|
}
|
|
|
|
// Field 5: num_threads
|
|
if (_has_field_[5]) {
|
|
msg->AppendVarInt(5, num_threads_);
|
|
}
|
|
|
|
// Field 6: max_events
|
|
if (_has_field_[6]) {
|
|
msg->AppendVarInt(6, max_events_);
|
|
}
|
|
|
|
// Field 7: nesting
|
|
if (_has_field_[7]) {
|
|
msg->AppendVarInt(7, nesting_);
|
|
}
|
|
|
|
// Field 8: steady_state_timings
|
|
if (_has_field_[8]) {
|
|
(*steady_state_timings_).Serialize(msg->BeginNestedMessage<::protozero::Message>(8));
|
|
}
|
|
|
|
// Field 9: burst_period_ms
|
|
if (_has_field_[9]) {
|
|
msg->AppendVarInt(9, burst_period_ms_);
|
|
}
|
|
|
|
// Field 10: burst_duration_ms
|
|
if (_has_field_[10]) {
|
|
msg->AppendVarInt(10, burst_duration_ms_);
|
|
}
|
|
|
|
// Field 11: burst_timings
|
|
if (_has_field_[11]) {
|
|
(*burst_timings_).Serialize(msg->BeginNestedMessage<::protozero::Message>(11));
|
|
}
|
|
|
|
msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
|
|
}
|
|
|
|
|
|
StressTestConfig_WriterTiming::StressTestConfig_WriterTiming() = default;
|
|
StressTestConfig_WriterTiming::~StressTestConfig_WriterTiming() = default;
|
|
StressTestConfig_WriterTiming::StressTestConfig_WriterTiming(const StressTestConfig_WriterTiming&) = default;
|
|
StressTestConfig_WriterTiming& StressTestConfig_WriterTiming::operator=(const StressTestConfig_WriterTiming&) = default;
|
|
StressTestConfig_WriterTiming::StressTestConfig_WriterTiming(StressTestConfig_WriterTiming&&) noexcept = default;
|
|
StressTestConfig_WriterTiming& StressTestConfig_WriterTiming::operator=(StressTestConfig_WriterTiming&&) = default;
|
|
|
|
bool StressTestConfig_WriterTiming::operator==(const StressTestConfig_WriterTiming& other) const {
|
|
return unknown_fields_ == other.unknown_fields_
|
|
&& payload_mean_ == other.payload_mean_
|
|
&& payload_stddev_ == other.payload_stddev_
|
|
&& rate_mean_ == other.rate_mean_
|
|
&& rate_stddev_ == other.rate_stddev_
|
|
&& payload_write_time_ms_ == other.payload_write_time_ms_;
|
|
}
|
|
|
|
bool StressTestConfig_WriterTiming::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* payload_mean */:
|
|
field.get(&payload_mean_);
|
|
break;
|
|
case 2 /* payload_stddev */:
|
|
field.get(&payload_stddev_);
|
|
break;
|
|
case 3 /* rate_mean */:
|
|
field.get(&rate_mean_);
|
|
break;
|
|
case 4 /* rate_stddev */:
|
|
field.get(&rate_stddev_);
|
|
break;
|
|
case 5 /* payload_write_time_ms */:
|
|
field.get(&payload_write_time_ms_);
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string StressTestConfig_WriterTiming::SerializeAsString() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> StressTestConfig_WriterTiming::SerializeAsArray() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void StressTestConfig_WriterTiming::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: payload_mean
|
|
if (_has_field_[1]) {
|
|
msg->AppendFixed(1, payload_mean_);
|
|
}
|
|
|
|
// Field 2: payload_stddev
|
|
if (_has_field_[2]) {
|
|
msg->AppendFixed(2, payload_stddev_);
|
|
}
|
|
|
|
// Field 3: rate_mean
|
|
if (_has_field_[3]) {
|
|
msg->AppendFixed(3, rate_mean_);
|
|
}
|
|
|
|
// Field 4: rate_stddev
|
|
if (_has_field_[4]) {
|
|
msg->AppendFixed(4, rate_stddev_);
|
|
}
|
|
|
|
// Field 5: payload_write_time_ms
|
|
if (_has_field_[5]) {
|
|
msg->AppendVarInt(5, payload_write_time_ms_);
|
|
}
|
|
|
|
msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
|
|
}
|
|
|
|
} // namespace perfetto
|
|
} // namespace protos
|
|
} // namespace gen
|
|
#pragma GCC diagnostic pop
|
|
// gen_amalgamated begin source: gen/protos/perfetto/config/test_config.gen.cc
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/scattered_heap_buffer.h"
|
|
// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
|
|
#pragma GCC diagnostic push
|
|
#pragma GCC diagnostic ignored "-Wfloat-equal"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/test_config.gen.h"
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace gen {
|
|
|
|
TestConfig::TestConfig() = default;
|
|
TestConfig::~TestConfig() = default;
|
|
TestConfig::TestConfig(const TestConfig&) = default;
|
|
TestConfig& TestConfig::operator=(const TestConfig&) = default;
|
|
TestConfig::TestConfig(TestConfig&&) noexcept = default;
|
|
TestConfig& TestConfig::operator=(TestConfig&&) = default;
|
|
|
|
bool TestConfig::operator==(const TestConfig& other) const {
|
|
return unknown_fields_ == other.unknown_fields_
|
|
&& message_count_ == other.message_count_
|
|
&& max_messages_per_second_ == other.max_messages_per_second_
|
|
&& seed_ == other.seed_
|
|
&& message_size_ == other.message_size_
|
|
&& send_batch_on_register_ == other.send_batch_on_register_
|
|
&& dummy_fields_ == other.dummy_fields_;
|
|
}
|
|
|
|
bool TestConfig::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* message_count */:
|
|
field.get(&message_count_);
|
|
break;
|
|
case 2 /* max_messages_per_second */:
|
|
field.get(&max_messages_per_second_);
|
|
break;
|
|
case 3 /* seed */:
|
|
field.get(&seed_);
|
|
break;
|
|
case 4 /* message_size */:
|
|
field.get(&message_size_);
|
|
break;
|
|
case 5 /* send_batch_on_register */:
|
|
field.get(&send_batch_on_register_);
|
|
break;
|
|
case 6 /* dummy_fields */:
|
|
(*dummy_fields_).ParseFromString(field.as_std_string());
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string TestConfig::SerializeAsString() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> TestConfig::SerializeAsArray() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void TestConfig::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: message_count
|
|
if (_has_field_[1]) {
|
|
msg->AppendVarInt(1, message_count_);
|
|
}
|
|
|
|
// Field 2: max_messages_per_second
|
|
if (_has_field_[2]) {
|
|
msg->AppendVarInt(2, max_messages_per_second_);
|
|
}
|
|
|
|
// Field 3: seed
|
|
if (_has_field_[3]) {
|
|
msg->AppendVarInt(3, seed_);
|
|
}
|
|
|
|
// Field 4: message_size
|
|
if (_has_field_[4]) {
|
|
msg->AppendVarInt(4, message_size_);
|
|
}
|
|
|
|
// Field 5: send_batch_on_register
|
|
if (_has_field_[5]) {
|
|
msg->AppendTinyVarInt(5, send_batch_on_register_);
|
|
}
|
|
|
|
// Field 6: dummy_fields
|
|
if (_has_field_[6]) {
|
|
(*dummy_fields_).Serialize(msg->BeginNestedMessage<::protozero::Message>(6));
|
|
}
|
|
|
|
msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
|
|
}
|
|
|
|
|
|
TestConfig_DummyFields::TestConfig_DummyFields() = default;
|
|
TestConfig_DummyFields::~TestConfig_DummyFields() = default;
|
|
TestConfig_DummyFields::TestConfig_DummyFields(const TestConfig_DummyFields&) = default;
|
|
TestConfig_DummyFields& TestConfig_DummyFields::operator=(const TestConfig_DummyFields&) = default;
|
|
TestConfig_DummyFields::TestConfig_DummyFields(TestConfig_DummyFields&&) noexcept = default;
|
|
TestConfig_DummyFields& TestConfig_DummyFields::operator=(TestConfig_DummyFields&&) = default;
|
|
|
|
bool TestConfig_DummyFields::operator==(const TestConfig_DummyFields& other) const {
|
|
return unknown_fields_ == other.unknown_fields_
|
|
&& field_uint32_ == other.field_uint32_
|
|
&& field_int32_ == other.field_int32_
|
|
&& field_uint64_ == other.field_uint64_
|
|
&& field_int64_ == other.field_int64_
|
|
&& field_fixed64_ == other.field_fixed64_
|
|
&& field_sfixed64_ == other.field_sfixed64_
|
|
&& field_fixed32_ == other.field_fixed32_
|
|
&& field_sfixed32_ == other.field_sfixed32_
|
|
&& field_double_ == other.field_double_
|
|
&& field_float_ == other.field_float_
|
|
&& field_sint64_ == other.field_sint64_
|
|
&& field_sint32_ == other.field_sint32_
|
|
&& field_string_ == other.field_string_
|
|
&& field_bytes_ == other.field_bytes_;
|
|
}
|
|
|
|
bool TestConfig_DummyFields::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* field_uint32 */:
|
|
field.get(&field_uint32_);
|
|
break;
|
|
case 2 /* field_int32 */:
|
|
field.get(&field_int32_);
|
|
break;
|
|
case 3 /* field_uint64 */:
|
|
field.get(&field_uint64_);
|
|
break;
|
|
case 4 /* field_int64 */:
|
|
field.get(&field_int64_);
|
|
break;
|
|
case 5 /* field_fixed64 */:
|
|
field.get(&field_fixed64_);
|
|
break;
|
|
case 6 /* field_sfixed64 */:
|
|
field.get(&field_sfixed64_);
|
|
break;
|
|
case 7 /* field_fixed32 */:
|
|
field.get(&field_fixed32_);
|
|
break;
|
|
case 8 /* field_sfixed32 */:
|
|
field.get(&field_sfixed32_);
|
|
break;
|
|
case 9 /* field_double */:
|
|
field.get(&field_double_);
|
|
break;
|
|
case 10 /* field_float */:
|
|
field.get(&field_float_);
|
|
break;
|
|
case 11 /* field_sint64 */:
|
|
field.get_signed(&field_sint64_);
|
|
break;
|
|
case 12 /* field_sint32 */:
|
|
field.get_signed(&field_sint32_);
|
|
break;
|
|
case 13 /* field_string */:
|
|
field.get(&field_string_);
|
|
break;
|
|
case 14 /* field_bytes */:
|
|
field.get(&field_bytes_);
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string TestConfig_DummyFields::SerializeAsString() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> TestConfig_DummyFields::SerializeAsArray() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void TestConfig_DummyFields::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: field_uint32
|
|
if (_has_field_[1]) {
|
|
msg->AppendVarInt(1, field_uint32_);
|
|
}
|
|
|
|
// Field 2: field_int32
|
|
if (_has_field_[2]) {
|
|
msg->AppendVarInt(2, field_int32_);
|
|
}
|
|
|
|
// Field 3: field_uint64
|
|
if (_has_field_[3]) {
|
|
msg->AppendVarInt(3, field_uint64_);
|
|
}
|
|
|
|
// Field 4: field_int64
|
|
if (_has_field_[4]) {
|
|
msg->AppendVarInt(4, field_int64_);
|
|
}
|
|
|
|
// Field 5: field_fixed64
|
|
if (_has_field_[5]) {
|
|
msg->AppendFixed(5, field_fixed64_);
|
|
}
|
|
|
|
// Field 6: field_sfixed64
|
|
if (_has_field_[6]) {
|
|
msg->AppendFixed(6, field_sfixed64_);
|
|
}
|
|
|
|
// Field 7: field_fixed32
|
|
if (_has_field_[7]) {
|
|
msg->AppendFixed(7, field_fixed32_);
|
|
}
|
|
|
|
// Field 8: field_sfixed32
|
|
if (_has_field_[8]) {
|
|
msg->AppendFixed(8, field_sfixed32_);
|
|
}
|
|
|
|
// Field 9: field_double
|
|
if (_has_field_[9]) {
|
|
msg->AppendFixed(9, field_double_);
|
|
}
|
|
|
|
// Field 10: field_float
|
|
if (_has_field_[10]) {
|
|
msg->AppendFixed(10, field_float_);
|
|
}
|
|
|
|
// Field 11: field_sint64
|
|
if (_has_field_[11]) {
|
|
msg->AppendSignedVarInt(11, field_sint64_);
|
|
}
|
|
|
|
// Field 12: field_sint32
|
|
if (_has_field_[12]) {
|
|
msg->AppendSignedVarInt(12, field_sint32_);
|
|
}
|
|
|
|
// Field 13: field_string
|
|
if (_has_field_[13]) {
|
|
msg->AppendString(13, field_string_);
|
|
}
|
|
|
|
// Field 14: field_bytes
|
|
if (_has_field_[14]) {
|
|
msg->AppendString(14, field_bytes_);
|
|
}
|
|
|
|
msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
|
|
}
|
|
|
|
} // namespace perfetto
|
|
} // namespace protos
|
|
} // namespace gen
|
|
#pragma GCC diagnostic pop
|
|
// gen_amalgamated begin source: gen/protos/perfetto/config/trace_config.gen.cc
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/scattered_heap_buffer.h"
|
|
// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
|
|
#pragma GCC diagnostic push
|
|
#pragma GCC diagnostic ignored "-Wfloat-equal"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/trace_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/common/builtin_clock.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/data_source_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/track_event/track_event_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/test_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/sys_stats/sys_stats_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/common/sys_stats_counters.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/profiling/perf_event_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/profiling/java_hprof_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/profiling/heapprofd_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/process_stats/process_stats_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/power/android_power_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/inode_file/inode_file_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/gpu/vulkan_memory_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/gpu/gpu_counter_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/ftrace/ftrace_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/chrome/chrome_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/android/packages_list_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/android/android_polled_state_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/android/android_log_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/common/android_log_constants.gen.h"
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace gen {
|
|
|
|
TraceConfig::TraceConfig() = default;
|
|
TraceConfig::~TraceConfig() = default;
|
|
TraceConfig::TraceConfig(const TraceConfig&) = default;
|
|
TraceConfig& TraceConfig::operator=(const TraceConfig&) = default;
|
|
TraceConfig::TraceConfig(TraceConfig&&) noexcept = default;
|
|
TraceConfig& TraceConfig::operator=(TraceConfig&&) = default;
|
|
|
|
bool TraceConfig::operator==(const TraceConfig& other) const {
|
|
return unknown_fields_ == other.unknown_fields_
|
|
&& buffers_ == other.buffers_
|
|
&& data_sources_ == other.data_sources_
|
|
&& builtin_data_sources_ == other.builtin_data_sources_
|
|
&& duration_ms_ == other.duration_ms_
|
|
&& enable_extra_guardrails_ == other.enable_extra_guardrails_
|
|
&& lockdown_mode_ == other.lockdown_mode_
|
|
&& producers_ == other.producers_
|
|
&& statsd_metadata_ == other.statsd_metadata_
|
|
&& write_into_file_ == other.write_into_file_
|
|
&& output_path_ == other.output_path_
|
|
&& file_write_period_ms_ == other.file_write_period_ms_
|
|
&& max_file_size_bytes_ == other.max_file_size_bytes_
|
|
&& guardrail_overrides_ == other.guardrail_overrides_
|
|
&& deferred_start_ == other.deferred_start_
|
|
&& flush_period_ms_ == other.flush_period_ms_
|
|
&& flush_timeout_ms_ == other.flush_timeout_ms_
|
|
&& data_source_stop_timeout_ms_ == other.data_source_stop_timeout_ms_
|
|
&& notify_traceur_ == other.notify_traceur_
|
|
&& trigger_config_ == other.trigger_config_
|
|
&& activate_triggers_ == other.activate_triggers_
|
|
&& incremental_state_config_ == other.incremental_state_config_
|
|
&& allow_user_build_tracing_ == other.allow_user_build_tracing_
|
|
&& unique_session_name_ == other.unique_session_name_
|
|
&& compression_type_ == other.compression_type_
|
|
&& incident_report_config_ == other.incident_report_config_
|
|
&& trace_uuid_msb_ == other.trace_uuid_msb_
|
|
&& trace_uuid_lsb_ == other.trace_uuid_lsb_;
|
|
}
|
|
|
|
bool TraceConfig::ParseFromArray(const void* raw, size_t size) {
|
|
buffers_.clear();
|
|
data_sources_.clear();
|
|
producers_.clear();
|
|
activate_triggers_.clear();
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* buffers */:
|
|
buffers_.emplace_back();
|
|
buffers_.back().ParseFromString(field.as_std_string());
|
|
break;
|
|
case 2 /* data_sources */:
|
|
data_sources_.emplace_back();
|
|
data_sources_.back().ParseFromString(field.as_std_string());
|
|
break;
|
|
case 20 /* builtin_data_sources */:
|
|
(*builtin_data_sources_).ParseFromString(field.as_std_string());
|
|
break;
|
|
case 3 /* duration_ms */:
|
|
field.get(&duration_ms_);
|
|
break;
|
|
case 4 /* enable_extra_guardrails */:
|
|
field.get(&enable_extra_guardrails_);
|
|
break;
|
|
case 5 /* lockdown_mode */:
|
|
field.get(&lockdown_mode_);
|
|
break;
|
|
case 6 /* producers */:
|
|
producers_.emplace_back();
|
|
producers_.back().ParseFromString(field.as_std_string());
|
|
break;
|
|
case 7 /* statsd_metadata */:
|
|
(*statsd_metadata_).ParseFromString(field.as_std_string());
|
|
break;
|
|
case 8 /* write_into_file */:
|
|
field.get(&write_into_file_);
|
|
break;
|
|
case 29 /* output_path */:
|
|
field.get(&output_path_);
|
|
break;
|
|
case 9 /* file_write_period_ms */:
|
|
field.get(&file_write_period_ms_);
|
|
break;
|
|
case 10 /* max_file_size_bytes */:
|
|
field.get(&max_file_size_bytes_);
|
|
break;
|
|
case 11 /* guardrail_overrides */:
|
|
(*guardrail_overrides_).ParseFromString(field.as_std_string());
|
|
break;
|
|
case 12 /* deferred_start */:
|
|
field.get(&deferred_start_);
|
|
break;
|
|
case 13 /* flush_period_ms */:
|
|
field.get(&flush_period_ms_);
|
|
break;
|
|
case 14 /* flush_timeout_ms */:
|
|
field.get(&flush_timeout_ms_);
|
|
break;
|
|
case 23 /* data_source_stop_timeout_ms */:
|
|
field.get(&data_source_stop_timeout_ms_);
|
|
break;
|
|
case 16 /* notify_traceur */:
|
|
field.get(¬ify_traceur_);
|
|
break;
|
|
case 17 /* trigger_config */:
|
|
(*trigger_config_).ParseFromString(field.as_std_string());
|
|
break;
|
|
case 18 /* activate_triggers */:
|
|
activate_triggers_.emplace_back();
|
|
field.get(&activate_triggers_.back());
|
|
break;
|
|
case 21 /* incremental_state_config */:
|
|
(*incremental_state_config_).ParseFromString(field.as_std_string());
|
|
break;
|
|
case 19 /* allow_user_build_tracing */:
|
|
field.get(&allow_user_build_tracing_);
|
|
break;
|
|
case 22 /* unique_session_name */:
|
|
field.get(&unique_session_name_);
|
|
break;
|
|
case 24 /* compression_type */:
|
|
field.get(&compression_type_);
|
|
break;
|
|
case 25 /* incident_report_config */:
|
|
(*incident_report_config_).ParseFromString(field.as_std_string());
|
|
break;
|
|
case 27 /* trace_uuid_msb */:
|
|
field.get(&trace_uuid_msb_);
|
|
break;
|
|
case 28 /* trace_uuid_lsb */:
|
|
field.get(&trace_uuid_lsb_);
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string TraceConfig::SerializeAsString() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> TraceConfig::SerializeAsArray() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void TraceConfig::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: buffers
|
|
for (auto& it : buffers_) {
|
|
it.Serialize(msg->BeginNestedMessage<::protozero::Message>(1));
|
|
}
|
|
|
|
// Field 2: data_sources
|
|
for (auto& it : data_sources_) {
|
|
it.Serialize(msg->BeginNestedMessage<::protozero::Message>(2));
|
|
}
|
|
|
|
// Field 20: builtin_data_sources
|
|
if (_has_field_[20]) {
|
|
(*builtin_data_sources_).Serialize(msg->BeginNestedMessage<::protozero::Message>(20));
|
|
}
|
|
|
|
// Field 3: duration_ms
|
|
if (_has_field_[3]) {
|
|
msg->AppendVarInt(3, duration_ms_);
|
|
}
|
|
|
|
// Field 4: enable_extra_guardrails
|
|
if (_has_field_[4]) {
|
|
msg->AppendTinyVarInt(4, enable_extra_guardrails_);
|
|
}
|
|
|
|
// Field 5: lockdown_mode
|
|
if (_has_field_[5]) {
|
|
msg->AppendVarInt(5, lockdown_mode_);
|
|
}
|
|
|
|
// Field 6: producers
|
|
for (auto& it : producers_) {
|
|
it.Serialize(msg->BeginNestedMessage<::protozero::Message>(6));
|
|
}
|
|
|
|
// Field 7: statsd_metadata
|
|
if (_has_field_[7]) {
|
|
(*statsd_metadata_).Serialize(msg->BeginNestedMessage<::protozero::Message>(7));
|
|
}
|
|
|
|
// Field 8: write_into_file
|
|
if (_has_field_[8]) {
|
|
msg->AppendTinyVarInt(8, write_into_file_);
|
|
}
|
|
|
|
// Field 29: output_path
|
|
if (_has_field_[29]) {
|
|
msg->AppendString(29, output_path_);
|
|
}
|
|
|
|
// Field 9: file_write_period_ms
|
|
if (_has_field_[9]) {
|
|
msg->AppendVarInt(9, file_write_period_ms_);
|
|
}
|
|
|
|
// Field 10: max_file_size_bytes
|
|
if (_has_field_[10]) {
|
|
msg->AppendVarInt(10, max_file_size_bytes_);
|
|
}
|
|
|
|
// Field 11: guardrail_overrides
|
|
if (_has_field_[11]) {
|
|
(*guardrail_overrides_).Serialize(msg->BeginNestedMessage<::protozero::Message>(11));
|
|
}
|
|
|
|
// Field 12: deferred_start
|
|
if (_has_field_[12]) {
|
|
msg->AppendTinyVarInt(12, deferred_start_);
|
|
}
|
|
|
|
// Field 13: flush_period_ms
|
|
if (_has_field_[13]) {
|
|
msg->AppendVarInt(13, flush_period_ms_);
|
|
}
|
|
|
|
// Field 14: flush_timeout_ms
|
|
if (_has_field_[14]) {
|
|
msg->AppendVarInt(14, flush_timeout_ms_);
|
|
}
|
|
|
|
// Field 23: data_source_stop_timeout_ms
|
|
if (_has_field_[23]) {
|
|
msg->AppendVarInt(23, data_source_stop_timeout_ms_);
|
|
}
|
|
|
|
// Field 16: notify_traceur
|
|
if (_has_field_[16]) {
|
|
msg->AppendTinyVarInt(16, notify_traceur_);
|
|
}
|
|
|
|
// Field 17: trigger_config
|
|
if (_has_field_[17]) {
|
|
(*trigger_config_).Serialize(msg->BeginNestedMessage<::protozero::Message>(17));
|
|
}
|
|
|
|
// Field 18: activate_triggers
|
|
for (auto& it : activate_triggers_) {
|
|
msg->AppendString(18, it);
|
|
}
|
|
|
|
// Field 21: incremental_state_config
|
|
if (_has_field_[21]) {
|
|
(*incremental_state_config_).Serialize(msg->BeginNestedMessage<::protozero::Message>(21));
|
|
}
|
|
|
|
// Field 19: allow_user_build_tracing
|
|
if (_has_field_[19]) {
|
|
msg->AppendTinyVarInt(19, allow_user_build_tracing_);
|
|
}
|
|
|
|
// Field 22: unique_session_name
|
|
if (_has_field_[22]) {
|
|
msg->AppendString(22, unique_session_name_);
|
|
}
|
|
|
|
// Field 24: compression_type
|
|
if (_has_field_[24]) {
|
|
msg->AppendVarInt(24, compression_type_);
|
|
}
|
|
|
|
// Field 25: incident_report_config
|
|
if (_has_field_[25]) {
|
|
(*incident_report_config_).Serialize(msg->BeginNestedMessage<::protozero::Message>(25));
|
|
}
|
|
|
|
// Field 27: trace_uuid_msb
|
|
if (_has_field_[27]) {
|
|
msg->AppendVarInt(27, trace_uuid_msb_);
|
|
}
|
|
|
|
// Field 28: trace_uuid_lsb
|
|
if (_has_field_[28]) {
|
|
msg->AppendVarInt(28, trace_uuid_lsb_);
|
|
}
|
|
|
|
msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
|
|
}
|
|
|
|
|
|
TraceConfig_IncidentReportConfig::TraceConfig_IncidentReportConfig() = default;
|
|
TraceConfig_IncidentReportConfig::~TraceConfig_IncidentReportConfig() = default;
|
|
TraceConfig_IncidentReportConfig::TraceConfig_IncidentReportConfig(const TraceConfig_IncidentReportConfig&) = default;
|
|
TraceConfig_IncidentReportConfig& TraceConfig_IncidentReportConfig::operator=(const TraceConfig_IncidentReportConfig&) = default;
|
|
TraceConfig_IncidentReportConfig::TraceConfig_IncidentReportConfig(TraceConfig_IncidentReportConfig&&) noexcept = default;
|
|
TraceConfig_IncidentReportConfig& TraceConfig_IncidentReportConfig::operator=(TraceConfig_IncidentReportConfig&&) = default;
|
|
|
|
bool TraceConfig_IncidentReportConfig::operator==(const TraceConfig_IncidentReportConfig& other) const {
|
|
return unknown_fields_ == other.unknown_fields_
|
|
&& destination_package_ == other.destination_package_
|
|
&& destination_class_ == other.destination_class_
|
|
&& privacy_level_ == other.privacy_level_
|
|
&& skip_dropbox_ == other.skip_dropbox_;
|
|
}
|
|
|
|
bool TraceConfig_IncidentReportConfig::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* destination_package */:
|
|
field.get(&destination_package_);
|
|
break;
|
|
case 2 /* destination_class */:
|
|
field.get(&destination_class_);
|
|
break;
|
|
case 3 /* privacy_level */:
|
|
field.get(&privacy_level_);
|
|
break;
|
|
case 4 /* skip_dropbox */:
|
|
field.get(&skip_dropbox_);
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string TraceConfig_IncidentReportConfig::SerializeAsString() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> TraceConfig_IncidentReportConfig::SerializeAsArray() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void TraceConfig_IncidentReportConfig::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: destination_package
|
|
if (_has_field_[1]) {
|
|
msg->AppendString(1, destination_package_);
|
|
}
|
|
|
|
// Field 2: destination_class
|
|
if (_has_field_[2]) {
|
|
msg->AppendString(2, destination_class_);
|
|
}
|
|
|
|
// Field 3: privacy_level
|
|
if (_has_field_[3]) {
|
|
msg->AppendVarInt(3, privacy_level_);
|
|
}
|
|
|
|
// Field 4: skip_dropbox
|
|
if (_has_field_[4]) {
|
|
msg->AppendTinyVarInt(4, skip_dropbox_);
|
|
}
|
|
|
|
msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
|
|
}
|
|
|
|
|
|
TraceConfig_IncrementalStateConfig::TraceConfig_IncrementalStateConfig() = default;
|
|
TraceConfig_IncrementalStateConfig::~TraceConfig_IncrementalStateConfig() = default;
|
|
TraceConfig_IncrementalStateConfig::TraceConfig_IncrementalStateConfig(const TraceConfig_IncrementalStateConfig&) = default;
|
|
TraceConfig_IncrementalStateConfig& TraceConfig_IncrementalStateConfig::operator=(const TraceConfig_IncrementalStateConfig&) = default;
|
|
TraceConfig_IncrementalStateConfig::TraceConfig_IncrementalStateConfig(TraceConfig_IncrementalStateConfig&&) noexcept = default;
|
|
TraceConfig_IncrementalStateConfig& TraceConfig_IncrementalStateConfig::operator=(TraceConfig_IncrementalStateConfig&&) = default;
|
|
|
|
bool TraceConfig_IncrementalStateConfig::operator==(const TraceConfig_IncrementalStateConfig& other) const {
|
|
return unknown_fields_ == other.unknown_fields_
|
|
&& clear_period_ms_ == other.clear_period_ms_;
|
|
}
|
|
|
|
bool TraceConfig_IncrementalStateConfig::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* clear_period_ms */:
|
|
field.get(&clear_period_ms_);
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string TraceConfig_IncrementalStateConfig::SerializeAsString() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> TraceConfig_IncrementalStateConfig::SerializeAsArray() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void TraceConfig_IncrementalStateConfig::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: clear_period_ms
|
|
if (_has_field_[1]) {
|
|
msg->AppendVarInt(1, clear_period_ms_);
|
|
}
|
|
|
|
msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
|
|
}
|
|
|
|
|
|
TraceConfig_TriggerConfig::TraceConfig_TriggerConfig() = default;
|
|
TraceConfig_TriggerConfig::~TraceConfig_TriggerConfig() = default;
|
|
TraceConfig_TriggerConfig::TraceConfig_TriggerConfig(const TraceConfig_TriggerConfig&) = default;
|
|
TraceConfig_TriggerConfig& TraceConfig_TriggerConfig::operator=(const TraceConfig_TriggerConfig&) = default;
|
|
TraceConfig_TriggerConfig::TraceConfig_TriggerConfig(TraceConfig_TriggerConfig&&) noexcept = default;
|
|
TraceConfig_TriggerConfig& TraceConfig_TriggerConfig::operator=(TraceConfig_TriggerConfig&&) = default;
|
|
|
|
bool TraceConfig_TriggerConfig::operator==(const TraceConfig_TriggerConfig& other) const {
|
|
return unknown_fields_ == other.unknown_fields_
|
|
&& trigger_mode_ == other.trigger_mode_
|
|
&& triggers_ == other.triggers_
|
|
&& trigger_timeout_ms_ == other.trigger_timeout_ms_;
|
|
}
|
|
|
|
bool TraceConfig_TriggerConfig::ParseFromArray(const void* raw, size_t size) {
|
|
triggers_.clear();
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* trigger_mode */:
|
|
field.get(&trigger_mode_);
|
|
break;
|
|
case 2 /* triggers */:
|
|
triggers_.emplace_back();
|
|
triggers_.back().ParseFromString(field.as_std_string());
|
|
break;
|
|
case 3 /* trigger_timeout_ms */:
|
|
field.get(&trigger_timeout_ms_);
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string TraceConfig_TriggerConfig::SerializeAsString() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> TraceConfig_TriggerConfig::SerializeAsArray() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void TraceConfig_TriggerConfig::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: trigger_mode
|
|
if (_has_field_[1]) {
|
|
msg->AppendVarInt(1, trigger_mode_);
|
|
}
|
|
|
|
// Field 2: triggers
|
|
for (auto& it : triggers_) {
|
|
it.Serialize(msg->BeginNestedMessage<::protozero::Message>(2));
|
|
}
|
|
|
|
// Field 3: trigger_timeout_ms
|
|
if (_has_field_[3]) {
|
|
msg->AppendVarInt(3, trigger_timeout_ms_);
|
|
}
|
|
|
|
msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
|
|
}
|
|
|
|
|
|
TraceConfig_TriggerConfig_Trigger::TraceConfig_TriggerConfig_Trigger() = default;
|
|
TraceConfig_TriggerConfig_Trigger::~TraceConfig_TriggerConfig_Trigger() = default;
|
|
TraceConfig_TriggerConfig_Trigger::TraceConfig_TriggerConfig_Trigger(const TraceConfig_TriggerConfig_Trigger&) = default;
|
|
TraceConfig_TriggerConfig_Trigger& TraceConfig_TriggerConfig_Trigger::operator=(const TraceConfig_TriggerConfig_Trigger&) = default;
|
|
TraceConfig_TriggerConfig_Trigger::TraceConfig_TriggerConfig_Trigger(TraceConfig_TriggerConfig_Trigger&&) noexcept = default;
|
|
TraceConfig_TriggerConfig_Trigger& TraceConfig_TriggerConfig_Trigger::operator=(TraceConfig_TriggerConfig_Trigger&&) = default;
|
|
|
|
bool TraceConfig_TriggerConfig_Trigger::operator==(const TraceConfig_TriggerConfig_Trigger& other) const {
|
|
return unknown_fields_ == other.unknown_fields_
|
|
&& name_ == other.name_
|
|
&& producer_name_regex_ == other.producer_name_regex_
|
|
&& stop_delay_ms_ == other.stop_delay_ms_;
|
|
}
|
|
|
|
bool TraceConfig_TriggerConfig_Trigger::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* name */:
|
|
field.get(&name_);
|
|
break;
|
|
case 2 /* producer_name_regex */:
|
|
field.get(&producer_name_regex_);
|
|
break;
|
|
case 3 /* stop_delay_ms */:
|
|
field.get(&stop_delay_ms_);
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string TraceConfig_TriggerConfig_Trigger::SerializeAsString() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> TraceConfig_TriggerConfig_Trigger::SerializeAsArray() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void TraceConfig_TriggerConfig_Trigger::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: name
|
|
if (_has_field_[1]) {
|
|
msg->AppendString(1, name_);
|
|
}
|
|
|
|
// Field 2: producer_name_regex
|
|
if (_has_field_[2]) {
|
|
msg->AppendString(2, producer_name_regex_);
|
|
}
|
|
|
|
// Field 3: stop_delay_ms
|
|
if (_has_field_[3]) {
|
|
msg->AppendVarInt(3, stop_delay_ms_);
|
|
}
|
|
|
|
msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
|
|
}
|
|
|
|
|
|
TraceConfig_GuardrailOverrides::TraceConfig_GuardrailOverrides() = default;
|
|
TraceConfig_GuardrailOverrides::~TraceConfig_GuardrailOverrides() = default;
|
|
TraceConfig_GuardrailOverrides::TraceConfig_GuardrailOverrides(const TraceConfig_GuardrailOverrides&) = default;
|
|
TraceConfig_GuardrailOverrides& TraceConfig_GuardrailOverrides::operator=(const TraceConfig_GuardrailOverrides&) = default;
|
|
TraceConfig_GuardrailOverrides::TraceConfig_GuardrailOverrides(TraceConfig_GuardrailOverrides&&) noexcept = default;
|
|
TraceConfig_GuardrailOverrides& TraceConfig_GuardrailOverrides::operator=(TraceConfig_GuardrailOverrides&&) = default;
|
|
|
|
bool TraceConfig_GuardrailOverrides::operator==(const TraceConfig_GuardrailOverrides& other) const {
|
|
return unknown_fields_ == other.unknown_fields_
|
|
&& max_upload_per_day_bytes_ == other.max_upload_per_day_bytes_;
|
|
}
|
|
|
|
bool TraceConfig_GuardrailOverrides::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* max_upload_per_day_bytes */:
|
|
field.get(&max_upload_per_day_bytes_);
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string TraceConfig_GuardrailOverrides::SerializeAsString() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> TraceConfig_GuardrailOverrides::SerializeAsArray() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void TraceConfig_GuardrailOverrides::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: max_upload_per_day_bytes
|
|
if (_has_field_[1]) {
|
|
msg->AppendVarInt(1, max_upload_per_day_bytes_);
|
|
}
|
|
|
|
msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
|
|
}
|
|
|
|
|
|
TraceConfig_StatsdMetadata::TraceConfig_StatsdMetadata() = default;
|
|
TraceConfig_StatsdMetadata::~TraceConfig_StatsdMetadata() = default;
|
|
TraceConfig_StatsdMetadata::TraceConfig_StatsdMetadata(const TraceConfig_StatsdMetadata&) = default;
|
|
TraceConfig_StatsdMetadata& TraceConfig_StatsdMetadata::operator=(const TraceConfig_StatsdMetadata&) = default;
|
|
TraceConfig_StatsdMetadata::TraceConfig_StatsdMetadata(TraceConfig_StatsdMetadata&&) noexcept = default;
|
|
TraceConfig_StatsdMetadata& TraceConfig_StatsdMetadata::operator=(TraceConfig_StatsdMetadata&&) = default;
|
|
|
|
bool TraceConfig_StatsdMetadata::operator==(const TraceConfig_StatsdMetadata& other) const {
|
|
return unknown_fields_ == other.unknown_fields_
|
|
&& triggering_alert_id_ == other.triggering_alert_id_
|
|
&& triggering_config_uid_ == other.triggering_config_uid_
|
|
&& triggering_config_id_ == other.triggering_config_id_
|
|
&& triggering_subscription_id_ == other.triggering_subscription_id_;
|
|
}
|
|
|
|
bool TraceConfig_StatsdMetadata::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* triggering_alert_id */:
|
|
field.get(&triggering_alert_id_);
|
|
break;
|
|
case 2 /* triggering_config_uid */:
|
|
field.get(&triggering_config_uid_);
|
|
break;
|
|
case 3 /* triggering_config_id */:
|
|
field.get(&triggering_config_id_);
|
|
break;
|
|
case 4 /* triggering_subscription_id */:
|
|
field.get(&triggering_subscription_id_);
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string TraceConfig_StatsdMetadata::SerializeAsString() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> TraceConfig_StatsdMetadata::SerializeAsArray() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void TraceConfig_StatsdMetadata::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: triggering_alert_id
|
|
if (_has_field_[1]) {
|
|
msg->AppendVarInt(1, triggering_alert_id_);
|
|
}
|
|
|
|
// Field 2: triggering_config_uid
|
|
if (_has_field_[2]) {
|
|
msg->AppendVarInt(2, triggering_config_uid_);
|
|
}
|
|
|
|
// Field 3: triggering_config_id
|
|
if (_has_field_[3]) {
|
|
msg->AppendVarInt(3, triggering_config_id_);
|
|
}
|
|
|
|
// Field 4: triggering_subscription_id
|
|
if (_has_field_[4]) {
|
|
msg->AppendVarInt(4, triggering_subscription_id_);
|
|
}
|
|
|
|
msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
|
|
}
|
|
|
|
|
|
TraceConfig_ProducerConfig::TraceConfig_ProducerConfig() = default;
|
|
TraceConfig_ProducerConfig::~TraceConfig_ProducerConfig() = default;
|
|
TraceConfig_ProducerConfig::TraceConfig_ProducerConfig(const TraceConfig_ProducerConfig&) = default;
|
|
TraceConfig_ProducerConfig& TraceConfig_ProducerConfig::operator=(const TraceConfig_ProducerConfig&) = default;
|
|
TraceConfig_ProducerConfig::TraceConfig_ProducerConfig(TraceConfig_ProducerConfig&&) noexcept = default;
|
|
TraceConfig_ProducerConfig& TraceConfig_ProducerConfig::operator=(TraceConfig_ProducerConfig&&) = default;
|
|
|
|
bool TraceConfig_ProducerConfig::operator==(const TraceConfig_ProducerConfig& other) const {
|
|
return unknown_fields_ == other.unknown_fields_
|
|
&& producer_name_ == other.producer_name_
|
|
&& shm_size_kb_ == other.shm_size_kb_
|
|
&& page_size_kb_ == other.page_size_kb_;
|
|
}
|
|
|
|
bool TraceConfig_ProducerConfig::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* producer_name */:
|
|
field.get(&producer_name_);
|
|
break;
|
|
case 2 /* shm_size_kb */:
|
|
field.get(&shm_size_kb_);
|
|
break;
|
|
case 3 /* page_size_kb */:
|
|
field.get(&page_size_kb_);
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string TraceConfig_ProducerConfig::SerializeAsString() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> TraceConfig_ProducerConfig::SerializeAsArray() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void TraceConfig_ProducerConfig::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: producer_name
|
|
if (_has_field_[1]) {
|
|
msg->AppendString(1, producer_name_);
|
|
}
|
|
|
|
// Field 2: shm_size_kb
|
|
if (_has_field_[2]) {
|
|
msg->AppendVarInt(2, shm_size_kb_);
|
|
}
|
|
|
|
// Field 3: page_size_kb
|
|
if (_has_field_[3]) {
|
|
msg->AppendVarInt(3, page_size_kb_);
|
|
}
|
|
|
|
msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
|
|
}
|
|
|
|
|
|
TraceConfig_BuiltinDataSource::TraceConfig_BuiltinDataSource() = default;
|
|
TraceConfig_BuiltinDataSource::~TraceConfig_BuiltinDataSource() = default;
|
|
TraceConfig_BuiltinDataSource::TraceConfig_BuiltinDataSource(const TraceConfig_BuiltinDataSource&) = default;
|
|
TraceConfig_BuiltinDataSource& TraceConfig_BuiltinDataSource::operator=(const TraceConfig_BuiltinDataSource&) = default;
|
|
TraceConfig_BuiltinDataSource::TraceConfig_BuiltinDataSource(TraceConfig_BuiltinDataSource&&) noexcept = default;
|
|
TraceConfig_BuiltinDataSource& TraceConfig_BuiltinDataSource::operator=(TraceConfig_BuiltinDataSource&&) = default;
|
|
|
|
bool TraceConfig_BuiltinDataSource::operator==(const TraceConfig_BuiltinDataSource& other) const {
|
|
return unknown_fields_ == other.unknown_fields_
|
|
&& disable_clock_snapshotting_ == other.disable_clock_snapshotting_
|
|
&& disable_trace_config_ == other.disable_trace_config_
|
|
&& disable_system_info_ == other.disable_system_info_
|
|
&& disable_service_events_ == other.disable_service_events_
|
|
&& primary_trace_clock_ == other.primary_trace_clock_
|
|
&& snapshot_interval_ms_ == other.snapshot_interval_ms_;
|
|
}
|
|
|
|
bool TraceConfig_BuiltinDataSource::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* disable_clock_snapshotting */:
|
|
field.get(&disable_clock_snapshotting_);
|
|
break;
|
|
case 2 /* disable_trace_config */:
|
|
field.get(&disable_trace_config_);
|
|
break;
|
|
case 3 /* disable_system_info */:
|
|
field.get(&disable_system_info_);
|
|
break;
|
|
case 4 /* disable_service_events */:
|
|
field.get(&disable_service_events_);
|
|
break;
|
|
case 5 /* primary_trace_clock */:
|
|
field.get(&primary_trace_clock_);
|
|
break;
|
|
case 6 /* snapshot_interval_ms */:
|
|
field.get(&snapshot_interval_ms_);
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string TraceConfig_BuiltinDataSource::SerializeAsString() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> TraceConfig_BuiltinDataSource::SerializeAsArray() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void TraceConfig_BuiltinDataSource::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: disable_clock_snapshotting
|
|
if (_has_field_[1]) {
|
|
msg->AppendTinyVarInt(1, disable_clock_snapshotting_);
|
|
}
|
|
|
|
// Field 2: disable_trace_config
|
|
if (_has_field_[2]) {
|
|
msg->AppendTinyVarInt(2, disable_trace_config_);
|
|
}
|
|
|
|
// Field 3: disable_system_info
|
|
if (_has_field_[3]) {
|
|
msg->AppendTinyVarInt(3, disable_system_info_);
|
|
}
|
|
|
|
// Field 4: disable_service_events
|
|
if (_has_field_[4]) {
|
|
msg->AppendTinyVarInt(4, disable_service_events_);
|
|
}
|
|
|
|
// Field 5: primary_trace_clock
|
|
if (_has_field_[5]) {
|
|
msg->AppendVarInt(5, primary_trace_clock_);
|
|
}
|
|
|
|
// Field 6: snapshot_interval_ms
|
|
if (_has_field_[6]) {
|
|
msg->AppendVarInt(6, snapshot_interval_ms_);
|
|
}
|
|
|
|
msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
|
|
}
|
|
|
|
|
|
TraceConfig_DataSource::TraceConfig_DataSource() = default;
|
|
TraceConfig_DataSource::~TraceConfig_DataSource() = default;
|
|
TraceConfig_DataSource::TraceConfig_DataSource(const TraceConfig_DataSource&) = default;
|
|
TraceConfig_DataSource& TraceConfig_DataSource::operator=(const TraceConfig_DataSource&) = default;
|
|
TraceConfig_DataSource::TraceConfig_DataSource(TraceConfig_DataSource&&) noexcept = default;
|
|
TraceConfig_DataSource& TraceConfig_DataSource::operator=(TraceConfig_DataSource&&) = default;
|
|
|
|
bool TraceConfig_DataSource::operator==(const TraceConfig_DataSource& other) const {
|
|
return unknown_fields_ == other.unknown_fields_
|
|
&& config_ == other.config_
|
|
&& producer_name_filter_ == other.producer_name_filter_
|
|
&& producer_name_regex_filter_ == other.producer_name_regex_filter_;
|
|
}
|
|
|
|
bool TraceConfig_DataSource::ParseFromArray(const void* raw, size_t size) {
|
|
producer_name_filter_.clear();
|
|
producer_name_regex_filter_.clear();
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* config */:
|
|
(*config_).ParseFromString(field.as_std_string());
|
|
break;
|
|
case 2 /* producer_name_filter */:
|
|
producer_name_filter_.emplace_back();
|
|
field.get(&producer_name_filter_.back());
|
|
break;
|
|
case 3 /* producer_name_regex_filter */:
|
|
producer_name_regex_filter_.emplace_back();
|
|
field.get(&producer_name_regex_filter_.back());
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string TraceConfig_DataSource::SerializeAsString() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> TraceConfig_DataSource::SerializeAsArray() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void TraceConfig_DataSource::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: config
|
|
if (_has_field_[1]) {
|
|
(*config_).Serialize(msg->BeginNestedMessage<::protozero::Message>(1));
|
|
}
|
|
|
|
// Field 2: producer_name_filter
|
|
for (auto& it : producer_name_filter_) {
|
|
msg->AppendString(2, it);
|
|
}
|
|
|
|
// Field 3: producer_name_regex_filter
|
|
for (auto& it : producer_name_regex_filter_) {
|
|
msg->AppendString(3, it);
|
|
}
|
|
|
|
msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
|
|
}
|
|
|
|
|
|
TraceConfig_BufferConfig::TraceConfig_BufferConfig() = default;
|
|
TraceConfig_BufferConfig::~TraceConfig_BufferConfig() = default;
|
|
TraceConfig_BufferConfig::TraceConfig_BufferConfig(const TraceConfig_BufferConfig&) = default;
|
|
TraceConfig_BufferConfig& TraceConfig_BufferConfig::operator=(const TraceConfig_BufferConfig&) = default;
|
|
TraceConfig_BufferConfig::TraceConfig_BufferConfig(TraceConfig_BufferConfig&&) noexcept = default;
|
|
TraceConfig_BufferConfig& TraceConfig_BufferConfig::operator=(TraceConfig_BufferConfig&&) = default;
|
|
|
|
bool TraceConfig_BufferConfig::operator==(const TraceConfig_BufferConfig& other) const {
|
|
return unknown_fields_ == other.unknown_fields_
|
|
&& size_kb_ == other.size_kb_
|
|
&& fill_policy_ == other.fill_policy_;
|
|
}
|
|
|
|
bool TraceConfig_BufferConfig::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* size_kb */:
|
|
field.get(&size_kb_);
|
|
break;
|
|
case 4 /* fill_policy */:
|
|
field.get(&fill_policy_);
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string TraceConfig_BufferConfig::SerializeAsString() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> TraceConfig_BufferConfig::SerializeAsArray() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void TraceConfig_BufferConfig::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: size_kb
|
|
if (_has_field_[1]) {
|
|
msg->AppendVarInt(1, size_kb_);
|
|
}
|
|
|
|
// Field 4: fill_policy
|
|
if (_has_field_[4]) {
|
|
msg->AppendVarInt(4, fill_policy_);
|
|
}
|
|
|
|
msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
|
|
}
|
|
|
|
} // namespace perfetto
|
|
} // namespace protos
|
|
} // namespace gen
|
|
#pragma GCC diagnostic pop
|
|
// gen_amalgamated begin source: gen/protos/perfetto/common/android_log_constants.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/common/builtin_clock.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/common/commit_data_request.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/common/data_source_descriptor.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/common/descriptor.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/common/gpu_counter_descriptor.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/common/observable_events.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/common/sys_stats_counters.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/common/trace_stats.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/common/tracing_service_capabilities.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/common/tracing_service_state.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/common/track_event_descriptor.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/gpu/gpu_counter_event.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/gpu/gpu_log.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/gpu/gpu_render_stage_event.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/gpu/vulkan_api_event.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/gpu/vulkan_memory_event.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/profiling/deobfuscation.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/profiling/heap_graph.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/profiling/profile_common.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/profiling/profile_packet.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/profiling/smaps.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/track_event/chrome_compositor_scheduler_state.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/track_event/chrome_frame_reporter.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/track_event/chrome_histogram_sample.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/track_event/chrome_keyed_service.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/track_event/chrome_latency_info.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/track_event/chrome_legacy_ipc.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/track_event/chrome_process_descriptor.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/track_event/chrome_thread_descriptor.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/track_event/chrome_user_event.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/track_event/counter_descriptor.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/track_event/debug_annotation.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/track_event/log_message.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/track_event/process_descriptor.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/track_event/source_location.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/track_event/task_execution.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/track_event/thread_descriptor.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/track_event/track_descriptor.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/track_event/track_event.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/interned_data/interned_data.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/track_event/chrome_compositor_scheduler_state.gen.cc
|
|
// gen_amalgamated begin header: gen/protos/perfetto/trace/track_event/chrome_compositor_scheduler_state.gen.h
|
|
// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
|
|
#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_CHROME_COMPOSITOR_SCHEDULER_STATE_PROTO_CPP_H_
|
|
#define PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_CHROME_COMPOSITOR_SCHEDULER_STATE_PROTO_CPP_H_
|
|
|
|
#include <stdint.h>
|
|
#include <bitset>
|
|
#include <vector>
|
|
#include <string>
|
|
#include <type_traits>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/cpp_message_obj.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/copyable_ptr.h"
|
|
// gen_amalgamated expanded: #include "perfetto/base/export.h"
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace gen {
|
|
class CompositorTimingHistory;
|
|
class BeginFrameSourceState;
|
|
class BeginFrameArgs;
|
|
class SourceLocation;
|
|
class BeginFrameObserverState;
|
|
class BeginImplFrameArgs;
|
|
class BeginImplFrameArgs_TimestampsInUs;
|
|
class ChromeCompositorStateMachine;
|
|
class ChromeCompositorStateMachine_MinorState;
|
|
class ChromeCompositorStateMachine_MajorState;
|
|
class ChromeCompositorSchedulerState;
|
|
enum ChromeCompositorSchedulerAction : int;
|
|
enum BeginFrameArgs_BeginFrameArgsType : int;
|
|
enum BeginImplFrameArgs_State : int;
|
|
enum ChromeCompositorStateMachine_MinorState_TreePriority : int;
|
|
enum ChromeCompositorStateMachine_MinorState_ScrollHandlerState : int;
|
|
enum ChromeCompositorStateMachine_MajorState_BeginImplFrameState : int;
|
|
enum ChromeCompositorStateMachine_MajorState_BeginMainFrameState : int;
|
|
enum ChromeCompositorStateMachine_MajorState_LayerTreeFrameSinkState : int;
|
|
enum ChromeCompositorStateMachine_MajorState_ForcedRedrawOnTimeoutState : int;
|
|
enum ChromeCompositorSchedulerState_BeginImplFrameDeadlineMode : int;
|
|
} // namespace perfetto
|
|
} // namespace protos
|
|
} // namespace gen
|
|
|
|
namespace protozero {
|
|
class Message;
|
|
} // namespace protozero
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace gen {
|
|
enum ChromeCompositorSchedulerAction : int {
|
|
CC_SCHEDULER_ACTION_UNSPECIFIED = 0,
|
|
CC_SCHEDULER_ACTION_NONE = 1,
|
|
CC_SCHEDULER_ACTION_SEND_BEGIN_MAIN_FRAME = 2,
|
|
CC_SCHEDULER_ACTION_COMMIT = 3,
|
|
CC_SCHEDULER_ACTION_ACTIVATE_SYNC_TREE = 4,
|
|
CC_SCHEDULER_ACTION_DRAW_IF_POSSIBLE = 5,
|
|
CC_SCHEDULER_ACTION_DRAW_FORCED = 6,
|
|
CC_SCHEDULER_ACTION_DRAW_ABORT = 7,
|
|
CC_SCHEDULER_ACTION_BEGIN_LAYER_TREE_FRAME_SINK_CREATION = 8,
|
|
CC_SCHEDULER_ACTION_PREPARE_TILES = 9,
|
|
CC_SCHEDULER_ACTION_INVALIDATE_LAYER_TREE_FRAME_SINK = 10,
|
|
CC_SCHEDULER_ACTION_PERFORM_IMPL_SIDE_INVALIDATION = 11,
|
|
CC_SCHEDULER_ACTION_NOTIFY_BEGIN_MAIN_FRAME_NOT_EXPECTED_UNTIL = 12,
|
|
CC_SCHEDULER_ACTION_NOTIFY_BEGIN_MAIN_FRAME_NOT_EXPECTED_SOON = 13,
|
|
};
|
|
enum BeginFrameArgs_BeginFrameArgsType : int {
|
|
BeginFrameArgs_BeginFrameArgsType_BEGIN_FRAME_ARGS_TYPE_UNSPECIFIED = 0,
|
|
BeginFrameArgs_BeginFrameArgsType_BEGIN_FRAME_ARGS_TYPE_INVALID = 1,
|
|
BeginFrameArgs_BeginFrameArgsType_BEGIN_FRAME_ARGS_TYPE_NORMAL = 2,
|
|
BeginFrameArgs_BeginFrameArgsType_BEGIN_FRAME_ARGS_TYPE_MISSED = 3,
|
|
};
|
|
enum BeginImplFrameArgs_State : int {
|
|
BeginImplFrameArgs_State_BEGIN_FRAME_FINISHED = 0,
|
|
BeginImplFrameArgs_State_BEGIN_FRAME_USING = 1,
|
|
};
|
|
enum ChromeCompositorStateMachine_MinorState_TreePriority : int {
|
|
ChromeCompositorStateMachine_MinorState_TreePriority_TREE_PRIORITY_UNSPECIFIED = 0,
|
|
ChromeCompositorStateMachine_MinorState_TreePriority_TREE_PRIORITY_SAME_PRIORITY_FOR_BOTH_TREES = 1,
|
|
ChromeCompositorStateMachine_MinorState_TreePriority_TREE_PRIORITY_SMOOTHNESS_TAKES_PRIORITY = 2,
|
|
ChromeCompositorStateMachine_MinorState_TreePriority_TREE_PRIORITY_NEW_CONTENT_TAKES_PRIORITY = 3,
|
|
};
|
|
enum ChromeCompositorStateMachine_MinorState_ScrollHandlerState : int {
|
|
ChromeCompositorStateMachine_MinorState_ScrollHandlerState_SCROLL_HANDLER_UNSPECIFIED = 0,
|
|
ChromeCompositorStateMachine_MinorState_ScrollHandlerState_SCROLL_AFFECTS_SCROLL_HANDLER = 1,
|
|
ChromeCompositorStateMachine_MinorState_ScrollHandlerState_SCROLL_DOES_NOT_AFFECT_SCROLL_HANDLER = 2,
|
|
};
|
|
enum ChromeCompositorStateMachine_MajorState_BeginImplFrameState : int {
|
|
ChromeCompositorStateMachine_MajorState_BeginImplFrameState_BEGIN_IMPL_FRAME_UNSPECIFIED = 0,
|
|
ChromeCompositorStateMachine_MajorState_BeginImplFrameState_BEGIN_IMPL_FRAME_IDLE = 1,
|
|
ChromeCompositorStateMachine_MajorState_BeginImplFrameState_BEGIN_IMPL_FRAME_INSIDE_BEGIN_FRAME = 2,
|
|
ChromeCompositorStateMachine_MajorState_BeginImplFrameState_BEGIN_IMPL_FRAME_INSIDE_DEADLINE = 3,
|
|
};
|
|
enum ChromeCompositorStateMachine_MajorState_BeginMainFrameState : int {
|
|
ChromeCompositorStateMachine_MajorState_BeginMainFrameState_BEGIN_MAIN_FRAME_UNSPECIFIED = 0,
|
|
ChromeCompositorStateMachine_MajorState_BeginMainFrameState_BEGIN_MAIN_FRAME_IDLE = 1,
|
|
ChromeCompositorStateMachine_MajorState_BeginMainFrameState_BEGIN_MAIN_FRAME_SENT = 2,
|
|
ChromeCompositorStateMachine_MajorState_BeginMainFrameState_BEGIN_MAIN_FRAME_READY_TO_COMMIT = 3,
|
|
};
|
|
enum ChromeCompositorStateMachine_MajorState_LayerTreeFrameSinkState : int {
|
|
ChromeCompositorStateMachine_MajorState_LayerTreeFrameSinkState_LAYER_TREE_FRAME_UNSPECIFIED = 0,
|
|
ChromeCompositorStateMachine_MajorState_LayerTreeFrameSinkState_LAYER_TREE_FRAME_NONE = 1,
|
|
ChromeCompositorStateMachine_MajorState_LayerTreeFrameSinkState_LAYER_TREE_FRAME_ACTIVE = 2,
|
|
ChromeCompositorStateMachine_MajorState_LayerTreeFrameSinkState_LAYER_TREE_FRAME_CREATING = 3,
|
|
ChromeCompositorStateMachine_MajorState_LayerTreeFrameSinkState_LAYER_TREE_FRAME_WAITING_FOR_FIRST_COMMIT = 4,
|
|
ChromeCompositorStateMachine_MajorState_LayerTreeFrameSinkState_LAYER_TREE_FRAME_WAITING_FOR_FIRST_ACTIVATION = 5,
|
|
};
|
|
enum ChromeCompositorStateMachine_MajorState_ForcedRedrawOnTimeoutState : int {
|
|
ChromeCompositorStateMachine_MajorState_ForcedRedrawOnTimeoutState_FORCED_REDRAW_UNSPECIFIED = 0,
|
|
ChromeCompositorStateMachine_MajorState_ForcedRedrawOnTimeoutState_FORCED_REDRAW_IDLE = 1,
|
|
ChromeCompositorStateMachine_MajorState_ForcedRedrawOnTimeoutState_FORCED_REDRAW_WAITING_FOR_COMMIT = 2,
|
|
ChromeCompositorStateMachine_MajorState_ForcedRedrawOnTimeoutState_FORCED_REDRAW_WAITING_FOR_ACTIVATION = 3,
|
|
ChromeCompositorStateMachine_MajorState_ForcedRedrawOnTimeoutState_FORCED_REDRAW_WAITING_FOR_DRAW = 4,
|
|
};
|
|
enum ChromeCompositorSchedulerState_BeginImplFrameDeadlineMode : int {
|
|
ChromeCompositorSchedulerState_BeginImplFrameDeadlineMode_DEADLINE_MODE_UNSPECIFIED = 0,
|
|
ChromeCompositorSchedulerState_BeginImplFrameDeadlineMode_DEADLINE_MODE_NONE = 1,
|
|
ChromeCompositorSchedulerState_BeginImplFrameDeadlineMode_DEADLINE_MODE_IMMEDIATE = 2,
|
|
ChromeCompositorSchedulerState_BeginImplFrameDeadlineMode_DEADLINE_MODE_REGULAR = 3,
|
|
ChromeCompositorSchedulerState_BeginImplFrameDeadlineMode_DEADLINE_MODE_LATE = 4,
|
|
ChromeCompositorSchedulerState_BeginImplFrameDeadlineMode_DEADLINE_MODE_BLOCKED = 5,
|
|
};
|
|
|
|
class PERFETTO_EXPORT CompositorTimingHistory : public ::protozero::CppMessageObj {
|
|
public:
|
|
enum FieldNumbers {
|
|
kBeginMainFrameQueueCriticalEstimateDeltaUsFieldNumber = 1,
|
|
kBeginMainFrameQueueNotCriticalEstimateDeltaUsFieldNumber = 2,
|
|
kBeginMainFrameStartToReadyToCommitEstimateDeltaUsFieldNumber = 3,
|
|
kCommitToReadyToActivateEstimateDeltaUsFieldNumber = 4,
|
|
kPrepareTilesEstimateDeltaUsFieldNumber = 5,
|
|
kActivateEstimateDeltaUsFieldNumber = 6,
|
|
kDrawEstimateDeltaUsFieldNumber = 7,
|
|
};
|
|
|
|
CompositorTimingHistory();
|
|
~CompositorTimingHistory() override;
|
|
CompositorTimingHistory(CompositorTimingHistory&&) noexcept;
|
|
CompositorTimingHistory& operator=(CompositorTimingHistory&&);
|
|
CompositorTimingHistory(const CompositorTimingHistory&);
|
|
CompositorTimingHistory& operator=(const CompositorTimingHistory&);
|
|
bool operator==(const CompositorTimingHistory&) const;
|
|
bool operator!=(const CompositorTimingHistory& other) const { return !(*this == other); }
|
|
|
|
bool ParseFromArray(const void*, size_t) override;
|
|
std::string SerializeAsString() const override;
|
|
std::vector<uint8_t> SerializeAsArray() const override;
|
|
void Serialize(::protozero::Message*) const;
|
|
|
|
bool has_begin_main_frame_queue_critical_estimate_delta_us() const { return _has_field_[1]; }
|
|
int64_t begin_main_frame_queue_critical_estimate_delta_us() const { return begin_main_frame_queue_critical_estimate_delta_us_; }
|
|
void set_begin_main_frame_queue_critical_estimate_delta_us(int64_t value) { begin_main_frame_queue_critical_estimate_delta_us_ = value; _has_field_.set(1); }
|
|
|
|
bool has_begin_main_frame_queue_not_critical_estimate_delta_us() const { return _has_field_[2]; }
|
|
int64_t begin_main_frame_queue_not_critical_estimate_delta_us() const { return begin_main_frame_queue_not_critical_estimate_delta_us_; }
|
|
void set_begin_main_frame_queue_not_critical_estimate_delta_us(int64_t value) { begin_main_frame_queue_not_critical_estimate_delta_us_ = value; _has_field_.set(2); }
|
|
|
|
bool has_begin_main_frame_start_to_ready_to_commit_estimate_delta_us() const { return _has_field_[3]; }
|
|
int64_t begin_main_frame_start_to_ready_to_commit_estimate_delta_us() const { return begin_main_frame_start_to_ready_to_commit_estimate_delta_us_; }
|
|
void set_begin_main_frame_start_to_ready_to_commit_estimate_delta_us(int64_t value) { begin_main_frame_start_to_ready_to_commit_estimate_delta_us_ = value; _has_field_.set(3); }
|
|
|
|
bool has_commit_to_ready_to_activate_estimate_delta_us() const { return _has_field_[4]; }
|
|
int64_t commit_to_ready_to_activate_estimate_delta_us() const { return commit_to_ready_to_activate_estimate_delta_us_; }
|
|
void set_commit_to_ready_to_activate_estimate_delta_us(int64_t value) { commit_to_ready_to_activate_estimate_delta_us_ = value; _has_field_.set(4); }
|
|
|
|
bool has_prepare_tiles_estimate_delta_us() const { return _has_field_[5]; }
|
|
int64_t prepare_tiles_estimate_delta_us() const { return prepare_tiles_estimate_delta_us_; }
|
|
void set_prepare_tiles_estimate_delta_us(int64_t value) { prepare_tiles_estimate_delta_us_ = value; _has_field_.set(5); }
|
|
|
|
bool has_activate_estimate_delta_us() const { return _has_field_[6]; }
|
|
int64_t activate_estimate_delta_us() const { return activate_estimate_delta_us_; }
|
|
void set_activate_estimate_delta_us(int64_t value) { activate_estimate_delta_us_ = value; _has_field_.set(6); }
|
|
|
|
bool has_draw_estimate_delta_us() const { return _has_field_[7]; }
|
|
int64_t draw_estimate_delta_us() const { return draw_estimate_delta_us_; }
|
|
void set_draw_estimate_delta_us(int64_t value) { draw_estimate_delta_us_ = value; _has_field_.set(7); }
|
|
|
|
private:
|
|
int64_t begin_main_frame_queue_critical_estimate_delta_us_{};
|
|
int64_t begin_main_frame_queue_not_critical_estimate_delta_us_{};
|
|
int64_t begin_main_frame_start_to_ready_to_commit_estimate_delta_us_{};
|
|
int64_t commit_to_ready_to_activate_estimate_delta_us_{};
|
|
int64_t prepare_tiles_estimate_delta_us_{};
|
|
int64_t activate_estimate_delta_us_{};
|
|
int64_t draw_estimate_delta_us_{};
|
|
|
|
// Allows to preserve unknown protobuf fields for compatibility
|
|
// with future versions of .proto files.
|
|
std::string unknown_fields_;
|
|
|
|
std::bitset<8> _has_field_{};
|
|
};
|
|
|
|
|
|
class PERFETTO_EXPORT BeginFrameSourceState : public ::protozero::CppMessageObj {
|
|
public:
|
|
enum FieldNumbers {
|
|
kSourceIdFieldNumber = 1,
|
|
kPausedFieldNumber = 2,
|
|
kNumObserversFieldNumber = 3,
|
|
kLastBeginFrameArgsFieldNumber = 4,
|
|
};
|
|
|
|
BeginFrameSourceState();
|
|
~BeginFrameSourceState() override;
|
|
BeginFrameSourceState(BeginFrameSourceState&&) noexcept;
|
|
BeginFrameSourceState& operator=(BeginFrameSourceState&&);
|
|
BeginFrameSourceState(const BeginFrameSourceState&);
|
|
BeginFrameSourceState& operator=(const BeginFrameSourceState&);
|
|
bool operator==(const BeginFrameSourceState&) const;
|
|
bool operator!=(const BeginFrameSourceState& other) const { return !(*this == other); }
|
|
|
|
bool ParseFromArray(const void*, size_t) override;
|
|
std::string SerializeAsString() const override;
|
|
std::vector<uint8_t> SerializeAsArray() const override;
|
|
void Serialize(::protozero::Message*) const;
|
|
|
|
bool has_source_id() const { return _has_field_[1]; }
|
|
uint32_t source_id() const { return source_id_; }
|
|
void set_source_id(uint32_t value) { source_id_ = value; _has_field_.set(1); }
|
|
|
|
bool has_paused() const { return _has_field_[2]; }
|
|
bool paused() const { return paused_; }
|
|
void set_paused(bool value) { paused_ = value; _has_field_.set(2); }
|
|
|
|
bool has_num_observers() const { return _has_field_[3]; }
|
|
uint32_t num_observers() const { return num_observers_; }
|
|
void set_num_observers(uint32_t value) { num_observers_ = value; _has_field_.set(3); }
|
|
|
|
bool has_last_begin_frame_args() const { return _has_field_[4]; }
|
|
const BeginFrameArgs& last_begin_frame_args() const { return *last_begin_frame_args_; }
|
|
BeginFrameArgs* mutable_last_begin_frame_args() { _has_field_.set(4); return last_begin_frame_args_.get(); }
|
|
|
|
private:
|
|
uint32_t source_id_{};
|
|
bool paused_{};
|
|
uint32_t num_observers_{};
|
|
::protozero::CopyablePtr<BeginFrameArgs> last_begin_frame_args_;
|
|
|
|
// Allows to preserve unknown protobuf fields for compatibility
|
|
// with future versions of .proto files.
|
|
std::string unknown_fields_;
|
|
|
|
std::bitset<5> _has_field_{};
|
|
};
|
|
|
|
|
|
class PERFETTO_EXPORT BeginFrameArgs : public ::protozero::CppMessageObj {
|
|
public:
|
|
using BeginFrameArgsType = BeginFrameArgs_BeginFrameArgsType;
|
|
static constexpr auto BEGIN_FRAME_ARGS_TYPE_UNSPECIFIED = BeginFrameArgs_BeginFrameArgsType_BEGIN_FRAME_ARGS_TYPE_UNSPECIFIED;
|
|
static constexpr auto BEGIN_FRAME_ARGS_TYPE_INVALID = BeginFrameArgs_BeginFrameArgsType_BEGIN_FRAME_ARGS_TYPE_INVALID;
|
|
static constexpr auto BEGIN_FRAME_ARGS_TYPE_NORMAL = BeginFrameArgs_BeginFrameArgsType_BEGIN_FRAME_ARGS_TYPE_NORMAL;
|
|
static constexpr auto BEGIN_FRAME_ARGS_TYPE_MISSED = BeginFrameArgs_BeginFrameArgsType_BEGIN_FRAME_ARGS_TYPE_MISSED;
|
|
static constexpr auto BeginFrameArgsType_MIN = BeginFrameArgs_BeginFrameArgsType_BEGIN_FRAME_ARGS_TYPE_UNSPECIFIED;
|
|
static constexpr auto BeginFrameArgsType_MAX = BeginFrameArgs_BeginFrameArgsType_BEGIN_FRAME_ARGS_TYPE_MISSED;
|
|
enum FieldNumbers {
|
|
kTypeFieldNumber = 1,
|
|
kSourceIdFieldNumber = 2,
|
|
kSequenceNumberFieldNumber = 3,
|
|
kFrameTimeUsFieldNumber = 4,
|
|
kDeadlineUsFieldNumber = 5,
|
|
kIntervalDeltaUsFieldNumber = 6,
|
|
kOnCriticalPathFieldNumber = 7,
|
|
kAnimateOnlyFieldNumber = 8,
|
|
kSourceLocationIidFieldNumber = 9,
|
|
kSourceLocationFieldNumber = 10,
|
|
};
|
|
|
|
BeginFrameArgs();
|
|
~BeginFrameArgs() override;
|
|
BeginFrameArgs(BeginFrameArgs&&) noexcept;
|
|
BeginFrameArgs& operator=(BeginFrameArgs&&);
|
|
BeginFrameArgs(const BeginFrameArgs&);
|
|
BeginFrameArgs& operator=(const BeginFrameArgs&);
|
|
bool operator==(const BeginFrameArgs&) const;
|
|
bool operator!=(const BeginFrameArgs& other) const { return !(*this == other); }
|
|
|
|
bool ParseFromArray(const void*, size_t) override;
|
|
std::string SerializeAsString() const override;
|
|
std::vector<uint8_t> SerializeAsArray() const override;
|
|
void Serialize(::protozero::Message*) const;
|
|
|
|
bool has_type() const { return _has_field_[1]; }
|
|
BeginFrameArgs_BeginFrameArgsType type() const { return type_; }
|
|
void set_type(BeginFrameArgs_BeginFrameArgsType value) { type_ = value; _has_field_.set(1); }
|
|
|
|
bool has_source_id() const { return _has_field_[2]; }
|
|
uint64_t source_id() const { return source_id_; }
|
|
void set_source_id(uint64_t value) { source_id_ = value; _has_field_.set(2); }
|
|
|
|
bool has_sequence_number() const { return _has_field_[3]; }
|
|
uint64_t sequence_number() const { return sequence_number_; }
|
|
void set_sequence_number(uint64_t value) { sequence_number_ = value; _has_field_.set(3); }
|
|
|
|
bool has_frame_time_us() const { return _has_field_[4]; }
|
|
int64_t frame_time_us() const { return frame_time_us_; }
|
|
void set_frame_time_us(int64_t value) { frame_time_us_ = value; _has_field_.set(4); }
|
|
|
|
bool has_deadline_us() const { return _has_field_[5]; }
|
|
int64_t deadline_us() const { return deadline_us_; }
|
|
void set_deadline_us(int64_t value) { deadline_us_ = value; _has_field_.set(5); }
|
|
|
|
bool has_interval_delta_us() const { return _has_field_[6]; }
|
|
int64_t interval_delta_us() const { return interval_delta_us_; }
|
|
void set_interval_delta_us(int64_t value) { interval_delta_us_ = value; _has_field_.set(6); }
|
|
|
|
bool has_on_critical_path() const { return _has_field_[7]; }
|
|
bool on_critical_path() const { return on_critical_path_; }
|
|
void set_on_critical_path(bool value) { on_critical_path_ = value; _has_field_.set(7); }
|
|
|
|
bool has_animate_only() const { return _has_field_[8]; }
|
|
bool animate_only() const { return animate_only_; }
|
|
void set_animate_only(bool value) { animate_only_ = value; _has_field_.set(8); }
|
|
|
|
bool has_source_location_iid() const { return _has_field_[9]; }
|
|
uint64_t source_location_iid() const { return source_location_iid_; }
|
|
void set_source_location_iid(uint64_t value) { source_location_iid_ = value; _has_field_.set(9); }
|
|
|
|
bool has_source_location() const { return _has_field_[10]; }
|
|
const SourceLocation& source_location() const { return *source_location_; }
|
|
SourceLocation* mutable_source_location() { _has_field_.set(10); return source_location_.get(); }
|
|
|
|
private:
|
|
BeginFrameArgs_BeginFrameArgsType type_{};
|
|
uint64_t source_id_{};
|
|
uint64_t sequence_number_{};
|
|
int64_t frame_time_us_{};
|
|
int64_t deadline_us_{};
|
|
int64_t interval_delta_us_{};
|
|
bool on_critical_path_{};
|
|
bool animate_only_{};
|
|
uint64_t source_location_iid_{};
|
|
::protozero::CopyablePtr<SourceLocation> source_location_;
|
|
|
|
// Allows to preserve unknown protobuf fields for compatibility
|
|
// with future versions of .proto files.
|
|
std::string unknown_fields_;
|
|
|
|
std::bitset<11> _has_field_{};
|
|
};
|
|
|
|
|
|
class PERFETTO_EXPORT BeginFrameObserverState : public ::protozero::CppMessageObj {
|
|
public:
|
|
enum FieldNumbers {
|
|
kDroppedBeginFrameArgsFieldNumber = 1,
|
|
kLastBeginFrameArgsFieldNumber = 2,
|
|
};
|
|
|
|
BeginFrameObserverState();
|
|
~BeginFrameObserverState() override;
|
|
BeginFrameObserverState(BeginFrameObserverState&&) noexcept;
|
|
BeginFrameObserverState& operator=(BeginFrameObserverState&&);
|
|
BeginFrameObserverState(const BeginFrameObserverState&);
|
|
BeginFrameObserverState& operator=(const BeginFrameObserverState&);
|
|
bool operator==(const BeginFrameObserverState&) const;
|
|
bool operator!=(const BeginFrameObserverState& other) const { return !(*this == other); }
|
|
|
|
bool ParseFromArray(const void*, size_t) override;
|
|
std::string SerializeAsString() const override;
|
|
std::vector<uint8_t> SerializeAsArray() const override;
|
|
void Serialize(::protozero::Message*) const;
|
|
|
|
bool has_dropped_begin_frame_args() const { return _has_field_[1]; }
|
|
int64_t dropped_begin_frame_args() const { return dropped_begin_frame_args_; }
|
|
void set_dropped_begin_frame_args(int64_t value) { dropped_begin_frame_args_ = value; _has_field_.set(1); }
|
|
|
|
bool has_last_begin_frame_args() const { return _has_field_[2]; }
|
|
const BeginFrameArgs& last_begin_frame_args() const { return *last_begin_frame_args_; }
|
|
BeginFrameArgs* mutable_last_begin_frame_args() { _has_field_.set(2); return last_begin_frame_args_.get(); }
|
|
|
|
private:
|
|
int64_t dropped_begin_frame_args_{};
|
|
::protozero::CopyablePtr<BeginFrameArgs> last_begin_frame_args_;
|
|
|
|
// Allows to preserve unknown protobuf fields for compatibility
|
|
// with future versions of .proto files.
|
|
std::string unknown_fields_;
|
|
|
|
std::bitset<3> _has_field_{};
|
|
};
|
|
|
|
|
|
class PERFETTO_EXPORT BeginImplFrameArgs : public ::protozero::CppMessageObj {
|
|
public:
|
|
using TimestampsInUs = BeginImplFrameArgs_TimestampsInUs;
|
|
using State = BeginImplFrameArgs_State;
|
|
static constexpr auto BEGIN_FRAME_FINISHED = BeginImplFrameArgs_State_BEGIN_FRAME_FINISHED;
|
|
static constexpr auto BEGIN_FRAME_USING = BeginImplFrameArgs_State_BEGIN_FRAME_USING;
|
|
static constexpr auto State_MIN = BeginImplFrameArgs_State_BEGIN_FRAME_FINISHED;
|
|
static constexpr auto State_MAX = BeginImplFrameArgs_State_BEGIN_FRAME_USING;
|
|
enum FieldNumbers {
|
|
kUpdatedAtUsFieldNumber = 1,
|
|
kFinishedAtUsFieldNumber = 2,
|
|
kStateFieldNumber = 3,
|
|
kCurrentArgsFieldNumber = 4,
|
|
kLastArgsFieldNumber = 5,
|
|
kTimestampsInUsFieldNumber = 6,
|
|
};
|
|
|
|
BeginImplFrameArgs();
|
|
~BeginImplFrameArgs() override;
|
|
BeginImplFrameArgs(BeginImplFrameArgs&&) noexcept;
|
|
BeginImplFrameArgs& operator=(BeginImplFrameArgs&&);
|
|
BeginImplFrameArgs(const BeginImplFrameArgs&);
|
|
BeginImplFrameArgs& operator=(const BeginImplFrameArgs&);
|
|
bool operator==(const BeginImplFrameArgs&) const;
|
|
bool operator!=(const BeginImplFrameArgs& other) const { return !(*this == other); }
|
|
|
|
bool ParseFromArray(const void*, size_t) override;
|
|
std::string SerializeAsString() const override;
|
|
std::vector<uint8_t> SerializeAsArray() const override;
|
|
void Serialize(::protozero::Message*) const;
|
|
|
|
bool has_updated_at_us() const { return _has_field_[1]; }
|
|
int64_t updated_at_us() const { return updated_at_us_; }
|
|
void set_updated_at_us(int64_t value) { updated_at_us_ = value; _has_field_.set(1); }
|
|
|
|
bool has_finished_at_us() const { return _has_field_[2]; }
|
|
int64_t finished_at_us() const { return finished_at_us_; }
|
|
void set_finished_at_us(int64_t value) { finished_at_us_ = value; _has_field_.set(2); }
|
|
|
|
bool has_state() const { return _has_field_[3]; }
|
|
BeginImplFrameArgs_State state() const { return state_; }
|
|
void set_state(BeginImplFrameArgs_State value) { state_ = value; _has_field_.set(3); }
|
|
|
|
bool has_current_args() const { return _has_field_[4]; }
|
|
const BeginFrameArgs& current_args() const { return *current_args_; }
|
|
BeginFrameArgs* mutable_current_args() { _has_field_.set(4); return current_args_.get(); }
|
|
|
|
bool has_last_args() const { return _has_field_[5]; }
|
|
const BeginFrameArgs& last_args() const { return *last_args_; }
|
|
BeginFrameArgs* mutable_last_args() { _has_field_.set(5); return last_args_.get(); }
|
|
|
|
bool has_timestamps_in_us() const { return _has_field_[6]; }
|
|
const BeginImplFrameArgs_TimestampsInUs& timestamps_in_us() const { return *timestamps_in_us_; }
|
|
BeginImplFrameArgs_TimestampsInUs* mutable_timestamps_in_us() { _has_field_.set(6); return timestamps_in_us_.get(); }
|
|
|
|
private:
|
|
int64_t updated_at_us_{};
|
|
int64_t finished_at_us_{};
|
|
BeginImplFrameArgs_State state_{};
|
|
::protozero::CopyablePtr<BeginFrameArgs> current_args_;
|
|
::protozero::CopyablePtr<BeginFrameArgs> last_args_;
|
|
::protozero::CopyablePtr<BeginImplFrameArgs_TimestampsInUs> timestamps_in_us_;
|
|
|
|
// Allows to preserve unknown protobuf fields for compatibility
|
|
// with future versions of .proto files.
|
|
std::string unknown_fields_;
|
|
|
|
std::bitset<7> _has_field_{};
|
|
};
|
|
|
|
|
|
class PERFETTO_EXPORT BeginImplFrameArgs_TimestampsInUs : public ::protozero::CppMessageObj {
|
|
public:
|
|
enum FieldNumbers {
|
|
kIntervalDeltaFieldNumber = 1,
|
|
kNowToDeadlineDeltaFieldNumber = 2,
|
|
kFrameTimeToNowDeltaFieldNumber = 3,
|
|
kFrameTimeToDeadlineDeltaFieldNumber = 4,
|
|
kNowFieldNumber = 5,
|
|
kFrameTimeFieldNumber = 6,
|
|
kDeadlineFieldNumber = 7,
|
|
};
|
|
|
|
BeginImplFrameArgs_TimestampsInUs();
|
|
~BeginImplFrameArgs_TimestampsInUs() override;
|
|
BeginImplFrameArgs_TimestampsInUs(BeginImplFrameArgs_TimestampsInUs&&) noexcept;
|
|
BeginImplFrameArgs_TimestampsInUs& operator=(BeginImplFrameArgs_TimestampsInUs&&);
|
|
BeginImplFrameArgs_TimestampsInUs(const BeginImplFrameArgs_TimestampsInUs&);
|
|
BeginImplFrameArgs_TimestampsInUs& operator=(const BeginImplFrameArgs_TimestampsInUs&);
|
|
bool operator==(const BeginImplFrameArgs_TimestampsInUs&) const;
|
|
bool operator!=(const BeginImplFrameArgs_TimestampsInUs& other) const { return !(*this == other); }
|
|
|
|
bool ParseFromArray(const void*, size_t) override;
|
|
std::string SerializeAsString() const override;
|
|
std::vector<uint8_t> SerializeAsArray() const override;
|
|
void Serialize(::protozero::Message*) const;
|
|
|
|
bool has_interval_delta() const { return _has_field_[1]; }
|
|
int64_t interval_delta() const { return interval_delta_; }
|
|
void set_interval_delta(int64_t value) { interval_delta_ = value; _has_field_.set(1); }
|
|
|
|
bool has_now_to_deadline_delta() const { return _has_field_[2]; }
|
|
int64_t now_to_deadline_delta() const { return now_to_deadline_delta_; }
|
|
void set_now_to_deadline_delta(int64_t value) { now_to_deadline_delta_ = value; _has_field_.set(2); }
|
|
|
|
bool has_frame_time_to_now_delta() const { return _has_field_[3]; }
|
|
int64_t frame_time_to_now_delta() const { return frame_time_to_now_delta_; }
|
|
void set_frame_time_to_now_delta(int64_t value) { frame_time_to_now_delta_ = value; _has_field_.set(3); }
|
|
|
|
bool has_frame_time_to_deadline_delta() const { return _has_field_[4]; }
|
|
int64_t frame_time_to_deadline_delta() const { return frame_time_to_deadline_delta_; }
|
|
void set_frame_time_to_deadline_delta(int64_t value) { frame_time_to_deadline_delta_ = value; _has_field_.set(4); }
|
|
|
|
bool has_now() const { return _has_field_[5]; }
|
|
int64_t now() const { return now_; }
|
|
void set_now(int64_t value) { now_ = value; _has_field_.set(5); }
|
|
|
|
bool has_frame_time() const { return _has_field_[6]; }
|
|
int64_t frame_time() const { return frame_time_; }
|
|
void set_frame_time(int64_t value) { frame_time_ = value; _has_field_.set(6); }
|
|
|
|
bool has_deadline() const { return _has_field_[7]; }
|
|
int64_t deadline() const { return deadline_; }
|
|
void set_deadline(int64_t value) { deadline_ = value; _has_field_.set(7); }
|
|
|
|
private:
|
|
int64_t interval_delta_{};
|
|
int64_t now_to_deadline_delta_{};
|
|
int64_t frame_time_to_now_delta_{};
|
|
int64_t frame_time_to_deadline_delta_{};
|
|
int64_t now_{};
|
|
int64_t frame_time_{};
|
|
int64_t deadline_{};
|
|
|
|
// Allows to preserve unknown protobuf fields for compatibility
|
|
// with future versions of .proto files.
|
|
std::string unknown_fields_;
|
|
|
|
std::bitset<8> _has_field_{};
|
|
};
|
|
|
|
|
|
class PERFETTO_EXPORT ChromeCompositorStateMachine : public ::protozero::CppMessageObj {
|
|
public:
|
|
using MajorState = ChromeCompositorStateMachine_MajorState;
|
|
using MinorState = ChromeCompositorStateMachine_MinorState;
|
|
enum FieldNumbers {
|
|
kMajorStateFieldNumber = 1,
|
|
kMinorStateFieldNumber = 2,
|
|
};
|
|
|
|
ChromeCompositorStateMachine();
|
|
~ChromeCompositorStateMachine() override;
|
|
ChromeCompositorStateMachine(ChromeCompositorStateMachine&&) noexcept;
|
|
ChromeCompositorStateMachine& operator=(ChromeCompositorStateMachine&&);
|
|
ChromeCompositorStateMachine(const ChromeCompositorStateMachine&);
|
|
ChromeCompositorStateMachine& operator=(const ChromeCompositorStateMachine&);
|
|
bool operator==(const ChromeCompositorStateMachine&) const;
|
|
bool operator!=(const ChromeCompositorStateMachine& other) const { return !(*this == other); }
|
|
|
|
bool ParseFromArray(const void*, size_t) override;
|
|
std::string SerializeAsString() const override;
|
|
std::vector<uint8_t> SerializeAsArray() const override;
|
|
void Serialize(::protozero::Message*) const;
|
|
|
|
bool has_major_state() const { return _has_field_[1]; }
|
|
const ChromeCompositorStateMachine_MajorState& major_state() const { return *major_state_; }
|
|
ChromeCompositorStateMachine_MajorState* mutable_major_state() { _has_field_.set(1); return major_state_.get(); }
|
|
|
|
bool has_minor_state() const { return _has_field_[2]; }
|
|
const ChromeCompositorStateMachine_MinorState& minor_state() const { return *minor_state_; }
|
|
ChromeCompositorStateMachine_MinorState* mutable_minor_state() { _has_field_.set(2); return minor_state_.get(); }
|
|
|
|
private:
|
|
::protozero::CopyablePtr<ChromeCompositorStateMachine_MajorState> major_state_;
|
|
::protozero::CopyablePtr<ChromeCompositorStateMachine_MinorState> minor_state_;
|
|
|
|
// Allows to preserve unknown protobuf fields for compatibility
|
|
// with future versions of .proto files.
|
|
std::string unknown_fields_;
|
|
|
|
std::bitset<3> _has_field_{};
|
|
};
|
|
|
|
|
|
class PERFETTO_EXPORT ChromeCompositorStateMachine_MinorState : public ::protozero::CppMessageObj {
|
|
public:
|
|
using TreePriority = ChromeCompositorStateMachine_MinorState_TreePriority;
|
|
static constexpr auto TREE_PRIORITY_UNSPECIFIED = ChromeCompositorStateMachine_MinorState_TreePriority_TREE_PRIORITY_UNSPECIFIED;
|
|
static constexpr auto TREE_PRIORITY_SAME_PRIORITY_FOR_BOTH_TREES = ChromeCompositorStateMachine_MinorState_TreePriority_TREE_PRIORITY_SAME_PRIORITY_FOR_BOTH_TREES;
|
|
static constexpr auto TREE_PRIORITY_SMOOTHNESS_TAKES_PRIORITY = ChromeCompositorStateMachine_MinorState_TreePriority_TREE_PRIORITY_SMOOTHNESS_TAKES_PRIORITY;
|
|
static constexpr auto TREE_PRIORITY_NEW_CONTENT_TAKES_PRIORITY = ChromeCompositorStateMachine_MinorState_TreePriority_TREE_PRIORITY_NEW_CONTENT_TAKES_PRIORITY;
|
|
static constexpr auto TreePriority_MIN = ChromeCompositorStateMachine_MinorState_TreePriority_TREE_PRIORITY_UNSPECIFIED;
|
|
static constexpr auto TreePriority_MAX = ChromeCompositorStateMachine_MinorState_TreePriority_TREE_PRIORITY_NEW_CONTENT_TAKES_PRIORITY;
|
|
using ScrollHandlerState = ChromeCompositorStateMachine_MinorState_ScrollHandlerState;
|
|
static constexpr auto SCROLL_HANDLER_UNSPECIFIED = ChromeCompositorStateMachine_MinorState_ScrollHandlerState_SCROLL_HANDLER_UNSPECIFIED;
|
|
static constexpr auto SCROLL_AFFECTS_SCROLL_HANDLER = ChromeCompositorStateMachine_MinorState_ScrollHandlerState_SCROLL_AFFECTS_SCROLL_HANDLER;
|
|
static constexpr auto SCROLL_DOES_NOT_AFFECT_SCROLL_HANDLER = ChromeCompositorStateMachine_MinorState_ScrollHandlerState_SCROLL_DOES_NOT_AFFECT_SCROLL_HANDLER;
|
|
static constexpr auto ScrollHandlerState_MIN = ChromeCompositorStateMachine_MinorState_ScrollHandlerState_SCROLL_HANDLER_UNSPECIFIED;
|
|
static constexpr auto ScrollHandlerState_MAX = ChromeCompositorStateMachine_MinorState_ScrollHandlerState_SCROLL_DOES_NOT_AFFECT_SCROLL_HANDLER;
|
|
enum FieldNumbers {
|
|
kCommitCountFieldNumber = 1,
|
|
kCurrentFrameNumberFieldNumber = 2,
|
|
kLastFrameNumberSubmitPerformedFieldNumber = 3,
|
|
kLastFrameNumberDrawPerformedFieldNumber = 4,
|
|
kLastFrameNumberBeginMainFrameSentFieldNumber = 5,
|
|
kDidDrawFieldNumber = 6,
|
|
kDidSendBeginMainFrameForCurrentFrameFieldNumber = 7,
|
|
kDidNotifyBeginMainFrameNotExpectedUntilFieldNumber = 8,
|
|
kDidNotifyBeginMainFrameNotExpectedSoonFieldNumber = 9,
|
|
kWantsBeginMainFrameNotExpectedFieldNumber = 10,
|
|
kDidCommitDuringFrameFieldNumber = 11,
|
|
kDidInvalidateLayerTreeFrameSinkFieldNumber = 12,
|
|
kDidPerformImplSideInvalidaionFieldNumber = 13,
|
|
kDidPrepareTilesFieldNumber = 14,
|
|
kConsecutiveCheckerboardAnimationsFieldNumber = 15,
|
|
kPendingSubmitFramesFieldNumber = 16,
|
|
kSubmitFramesWithCurrentLayerTreeFrameSinkFieldNumber = 17,
|
|
kNeedsRedrawFieldNumber = 18,
|
|
kNeedsPrepareTilesFieldNumber = 19,
|
|
kNeedsBeginMainFrameFieldNumber = 20,
|
|
kNeedsOneBeginImplFrameFieldNumber = 21,
|
|
kVisibleFieldNumber = 22,
|
|
kBeginFrameSourcePausedFieldNumber = 23,
|
|
kCanDrawFieldNumber = 24,
|
|
kResourcelessDrawFieldNumber = 25,
|
|
kHasPendingTreeFieldNumber = 26,
|
|
kPendingTreeIsReadyForActivationFieldNumber = 27,
|
|
kActiveTreeNeedsFirstDrawFieldNumber = 28,
|
|
kActiveTreeIsReadyToDrawFieldNumber = 29,
|
|
kDidCreateAndInitializeFirstLayerTreeFrameSinkFieldNumber = 30,
|
|
kTreePriorityFieldNumber = 31,
|
|
kScrollHandlerStateFieldNumber = 32,
|
|
kCriticalBeginMainFrameToActivateIsFastFieldNumber = 33,
|
|
kMainThreadMissedLastDeadlineFieldNumber = 34,
|
|
kSkipNextBeginMainFrameToReduceLatencyFieldNumber = 35,
|
|
kVideoNeedsBeginFramesFieldNumber = 36,
|
|
kDeferBeginMainFrameFieldNumber = 37,
|
|
kLastCommitHadNoUpdatesFieldNumber = 38,
|
|
kDidDrawInLastFrameFieldNumber = 39,
|
|
kDidSubmitInLastFrameFieldNumber = 40,
|
|
kNeedsImplSideInvalidationFieldNumber = 41,
|
|
kCurrentPendingTreeIsImplSideFieldNumber = 42,
|
|
kPreviousPendingTreeWasImplSideFieldNumber = 43,
|
|
kProcessingAnimationWorkletsForActiveTreeFieldNumber = 44,
|
|
kProcessingAnimationWorkletsForPendingTreeFieldNumber = 45,
|
|
kProcessingPaintWorkletsForPendingTreeFieldNumber = 46,
|
|
};
|
|
|
|
ChromeCompositorStateMachine_MinorState();
|
|
~ChromeCompositorStateMachine_MinorState() override;
|
|
ChromeCompositorStateMachine_MinorState(ChromeCompositorStateMachine_MinorState&&) noexcept;
|
|
ChromeCompositorStateMachine_MinorState& operator=(ChromeCompositorStateMachine_MinorState&&);
|
|
ChromeCompositorStateMachine_MinorState(const ChromeCompositorStateMachine_MinorState&);
|
|
ChromeCompositorStateMachine_MinorState& operator=(const ChromeCompositorStateMachine_MinorState&);
|
|
bool operator==(const ChromeCompositorStateMachine_MinorState&) const;
|
|
bool operator!=(const ChromeCompositorStateMachine_MinorState& other) const { return !(*this == other); }
|
|
|
|
bool ParseFromArray(const void*, size_t) override;
|
|
std::string SerializeAsString() const override;
|
|
std::vector<uint8_t> SerializeAsArray() const override;
|
|
void Serialize(::protozero::Message*) const;
|
|
|
|
bool has_commit_count() const { return _has_field_[1]; }
|
|
int32_t commit_count() const { return commit_count_; }
|
|
void set_commit_count(int32_t value) { commit_count_ = value; _has_field_.set(1); }
|
|
|
|
bool has_current_frame_number() const { return _has_field_[2]; }
|
|
int32_t current_frame_number() const { return current_frame_number_; }
|
|
void set_current_frame_number(int32_t value) { current_frame_number_ = value; _has_field_.set(2); }
|
|
|
|
bool has_last_frame_number_submit_performed() const { return _has_field_[3]; }
|
|
int32_t last_frame_number_submit_performed() const { return last_frame_number_submit_performed_; }
|
|
void set_last_frame_number_submit_performed(int32_t value) { last_frame_number_submit_performed_ = value; _has_field_.set(3); }
|
|
|
|
bool has_last_frame_number_draw_performed() const { return _has_field_[4]; }
|
|
int32_t last_frame_number_draw_performed() const { return last_frame_number_draw_performed_; }
|
|
void set_last_frame_number_draw_performed(int32_t value) { last_frame_number_draw_performed_ = value; _has_field_.set(4); }
|
|
|
|
bool has_last_frame_number_begin_main_frame_sent() const { return _has_field_[5]; }
|
|
int32_t last_frame_number_begin_main_frame_sent() const { return last_frame_number_begin_main_frame_sent_; }
|
|
void set_last_frame_number_begin_main_frame_sent(int32_t value) { last_frame_number_begin_main_frame_sent_ = value; _has_field_.set(5); }
|
|
|
|
bool has_did_draw() const { return _has_field_[6]; }
|
|
bool did_draw() const { return did_draw_; }
|
|
void set_did_draw(bool value) { did_draw_ = value; _has_field_.set(6); }
|
|
|
|
bool has_did_send_begin_main_frame_for_current_frame() const { return _has_field_[7]; }
|
|
bool did_send_begin_main_frame_for_current_frame() const { return did_send_begin_main_frame_for_current_frame_; }
|
|
void set_did_send_begin_main_frame_for_current_frame(bool value) { did_send_begin_main_frame_for_current_frame_ = value; _has_field_.set(7); }
|
|
|
|
bool has_did_notify_begin_main_frame_not_expected_until() const { return _has_field_[8]; }
|
|
bool did_notify_begin_main_frame_not_expected_until() const { return did_notify_begin_main_frame_not_expected_until_; }
|
|
void set_did_notify_begin_main_frame_not_expected_until(bool value) { did_notify_begin_main_frame_not_expected_until_ = value; _has_field_.set(8); }
|
|
|
|
bool has_did_notify_begin_main_frame_not_expected_soon() const { return _has_field_[9]; }
|
|
bool did_notify_begin_main_frame_not_expected_soon() const { return did_notify_begin_main_frame_not_expected_soon_; }
|
|
void set_did_notify_begin_main_frame_not_expected_soon(bool value) { did_notify_begin_main_frame_not_expected_soon_ = value; _has_field_.set(9); }
|
|
|
|
bool has_wants_begin_main_frame_not_expected() const { return _has_field_[10]; }
|
|
bool wants_begin_main_frame_not_expected() const { return wants_begin_main_frame_not_expected_; }
|
|
void set_wants_begin_main_frame_not_expected(bool value) { wants_begin_main_frame_not_expected_ = value; _has_field_.set(10); }
|
|
|
|
bool has_did_commit_during_frame() const { return _has_field_[11]; }
|
|
bool did_commit_during_frame() const { return did_commit_during_frame_; }
|
|
void set_did_commit_during_frame(bool value) { did_commit_during_frame_ = value; _has_field_.set(11); }
|
|
|
|
bool has_did_invalidate_layer_tree_frame_sink() const { return _has_field_[12]; }
|
|
bool did_invalidate_layer_tree_frame_sink() const { return did_invalidate_layer_tree_frame_sink_; }
|
|
void set_did_invalidate_layer_tree_frame_sink(bool value) { did_invalidate_layer_tree_frame_sink_ = value; _has_field_.set(12); }
|
|
|
|
bool has_did_perform_impl_side_invalidaion() const { return _has_field_[13]; }
|
|
bool did_perform_impl_side_invalidaion() const { return did_perform_impl_side_invalidaion_; }
|
|
void set_did_perform_impl_side_invalidaion(bool value) { did_perform_impl_side_invalidaion_ = value; _has_field_.set(13); }
|
|
|
|
bool has_did_prepare_tiles() const { return _has_field_[14]; }
|
|
bool did_prepare_tiles() const { return did_prepare_tiles_; }
|
|
void set_did_prepare_tiles(bool value) { did_prepare_tiles_ = value; _has_field_.set(14); }
|
|
|
|
bool has_consecutive_checkerboard_animations() const { return _has_field_[15]; }
|
|
int32_t consecutive_checkerboard_animations() const { return consecutive_checkerboard_animations_; }
|
|
void set_consecutive_checkerboard_animations(int32_t value) { consecutive_checkerboard_animations_ = value; _has_field_.set(15); }
|
|
|
|
bool has_pending_submit_frames() const { return _has_field_[16]; }
|
|
int32_t pending_submit_frames() const { return pending_submit_frames_; }
|
|
void set_pending_submit_frames(int32_t value) { pending_submit_frames_ = value; _has_field_.set(16); }
|
|
|
|
bool has_submit_frames_with_current_layer_tree_frame_sink() const { return _has_field_[17]; }
|
|
int32_t submit_frames_with_current_layer_tree_frame_sink() const { return submit_frames_with_current_layer_tree_frame_sink_; }
|
|
void set_submit_frames_with_current_layer_tree_frame_sink(int32_t value) { submit_frames_with_current_layer_tree_frame_sink_ = value; _has_field_.set(17); }
|
|
|
|
bool has_needs_redraw() const { return _has_field_[18]; }
|
|
bool needs_redraw() const { return needs_redraw_; }
|
|
void set_needs_redraw(bool value) { needs_redraw_ = value; _has_field_.set(18); }
|
|
|
|
bool has_needs_prepare_tiles() const { return _has_field_[19]; }
|
|
bool needs_prepare_tiles() const { return needs_prepare_tiles_; }
|
|
void set_needs_prepare_tiles(bool value) { needs_prepare_tiles_ = value; _has_field_.set(19); }
|
|
|
|
bool has_needs_begin_main_frame() const { return _has_field_[20]; }
|
|
bool needs_begin_main_frame() const { return needs_begin_main_frame_; }
|
|
void set_needs_begin_main_frame(bool value) { needs_begin_main_frame_ = value; _has_field_.set(20); }
|
|
|
|
bool has_needs_one_begin_impl_frame() const { return _has_field_[21]; }
|
|
bool needs_one_begin_impl_frame() const { return needs_one_begin_impl_frame_; }
|
|
void set_needs_one_begin_impl_frame(bool value) { needs_one_begin_impl_frame_ = value; _has_field_.set(21); }
|
|
|
|
bool has_visible() const { return _has_field_[22]; }
|
|
bool visible() const { return visible_; }
|
|
void set_visible(bool value) { visible_ = value; _has_field_.set(22); }
|
|
|
|
bool has_begin_frame_source_paused() const { return _has_field_[23]; }
|
|
bool begin_frame_source_paused() const { return begin_frame_source_paused_; }
|
|
void set_begin_frame_source_paused(bool value) { begin_frame_source_paused_ = value; _has_field_.set(23); }
|
|
|
|
bool has_can_draw() const { return _has_field_[24]; }
|
|
bool can_draw() const { return can_draw_; }
|
|
void set_can_draw(bool value) { can_draw_ = value; _has_field_.set(24); }
|
|
|
|
bool has_resourceless_draw() const { return _has_field_[25]; }
|
|
bool resourceless_draw() const { return resourceless_draw_; }
|
|
void set_resourceless_draw(bool value) { resourceless_draw_ = value; _has_field_.set(25); }
|
|
|
|
bool has_has_pending_tree() const { return _has_field_[26]; }
|
|
bool has_pending_tree() const { return has_pending_tree_; }
|
|
void set_has_pending_tree(bool value) { has_pending_tree_ = value; _has_field_.set(26); }
|
|
|
|
bool has_pending_tree_is_ready_for_activation() const { return _has_field_[27]; }
|
|
bool pending_tree_is_ready_for_activation() const { return pending_tree_is_ready_for_activation_; }
|
|
void set_pending_tree_is_ready_for_activation(bool value) { pending_tree_is_ready_for_activation_ = value; _has_field_.set(27); }
|
|
|
|
bool has_active_tree_needs_first_draw() const { return _has_field_[28]; }
|
|
bool active_tree_needs_first_draw() const { return active_tree_needs_first_draw_; }
|
|
void set_active_tree_needs_first_draw(bool value) { active_tree_needs_first_draw_ = value; _has_field_.set(28); }
|
|
|
|
bool has_active_tree_is_ready_to_draw() const { return _has_field_[29]; }
|
|
bool active_tree_is_ready_to_draw() const { return active_tree_is_ready_to_draw_; }
|
|
void set_active_tree_is_ready_to_draw(bool value) { active_tree_is_ready_to_draw_ = value; _has_field_.set(29); }
|
|
|
|
bool has_did_create_and_initialize_first_layer_tree_frame_sink() const { return _has_field_[30]; }
|
|
bool did_create_and_initialize_first_layer_tree_frame_sink() const { return did_create_and_initialize_first_layer_tree_frame_sink_; }
|
|
void set_did_create_and_initialize_first_layer_tree_frame_sink(bool value) { did_create_and_initialize_first_layer_tree_frame_sink_ = value; _has_field_.set(30); }
|
|
|
|
bool has_tree_priority() const { return _has_field_[31]; }
|
|
ChromeCompositorStateMachine_MinorState_TreePriority tree_priority() const { return tree_priority_; }
|
|
void set_tree_priority(ChromeCompositorStateMachine_MinorState_TreePriority value) { tree_priority_ = value; _has_field_.set(31); }
|
|
|
|
bool has_scroll_handler_state() const { return _has_field_[32]; }
|
|
ChromeCompositorStateMachine_MinorState_ScrollHandlerState scroll_handler_state() const { return scroll_handler_state_; }
|
|
void set_scroll_handler_state(ChromeCompositorStateMachine_MinorState_ScrollHandlerState value) { scroll_handler_state_ = value; _has_field_.set(32); }
|
|
|
|
bool has_critical_begin_main_frame_to_activate_is_fast() const { return _has_field_[33]; }
|
|
bool critical_begin_main_frame_to_activate_is_fast() const { return critical_begin_main_frame_to_activate_is_fast_; }
|
|
void set_critical_begin_main_frame_to_activate_is_fast(bool value) { critical_begin_main_frame_to_activate_is_fast_ = value; _has_field_.set(33); }
|
|
|
|
bool has_main_thread_missed_last_deadline() const { return _has_field_[34]; }
|
|
bool main_thread_missed_last_deadline() const { return main_thread_missed_last_deadline_; }
|
|
void set_main_thread_missed_last_deadline(bool value) { main_thread_missed_last_deadline_ = value; _has_field_.set(34); }
|
|
|
|
bool has_skip_next_begin_main_frame_to_reduce_latency() const { return _has_field_[35]; }
|
|
bool skip_next_begin_main_frame_to_reduce_latency() const { return skip_next_begin_main_frame_to_reduce_latency_; }
|
|
void set_skip_next_begin_main_frame_to_reduce_latency(bool value) { skip_next_begin_main_frame_to_reduce_latency_ = value; _has_field_.set(35); }
|
|
|
|
bool has_video_needs_begin_frames() const { return _has_field_[36]; }
|
|
bool video_needs_begin_frames() const { return video_needs_begin_frames_; }
|
|
void set_video_needs_begin_frames(bool value) { video_needs_begin_frames_ = value; _has_field_.set(36); }
|
|
|
|
bool has_defer_begin_main_frame() const { return _has_field_[37]; }
|
|
bool defer_begin_main_frame() const { return defer_begin_main_frame_; }
|
|
void set_defer_begin_main_frame(bool value) { defer_begin_main_frame_ = value; _has_field_.set(37); }
|
|
|
|
bool has_last_commit_had_no_updates() const { return _has_field_[38]; }
|
|
bool last_commit_had_no_updates() const { return last_commit_had_no_updates_; }
|
|
void set_last_commit_had_no_updates(bool value) { last_commit_had_no_updates_ = value; _has_field_.set(38); }
|
|
|
|
bool has_did_draw_in_last_frame() const { return _has_field_[39]; }
|
|
bool did_draw_in_last_frame() const { return did_draw_in_last_frame_; }
|
|
void set_did_draw_in_last_frame(bool value) { did_draw_in_last_frame_ = value; _has_field_.set(39); }
|
|
|
|
bool has_did_submit_in_last_frame() const { return _has_field_[40]; }
|
|
bool did_submit_in_last_frame() const { return did_submit_in_last_frame_; }
|
|
void set_did_submit_in_last_frame(bool value) { did_submit_in_last_frame_ = value; _has_field_.set(40); }
|
|
|
|
bool has_needs_impl_side_invalidation() const { return _has_field_[41]; }
|
|
bool needs_impl_side_invalidation() const { return needs_impl_side_invalidation_; }
|
|
void set_needs_impl_side_invalidation(bool value) { needs_impl_side_invalidation_ = value; _has_field_.set(41); }
|
|
|
|
bool has_current_pending_tree_is_impl_side() const { return _has_field_[42]; }
|
|
bool current_pending_tree_is_impl_side() const { return current_pending_tree_is_impl_side_; }
|
|
void set_current_pending_tree_is_impl_side(bool value) { current_pending_tree_is_impl_side_ = value; _has_field_.set(42); }
|
|
|
|
bool has_previous_pending_tree_was_impl_side() const { return _has_field_[43]; }
|
|
bool previous_pending_tree_was_impl_side() const { return previous_pending_tree_was_impl_side_; }
|
|
void set_previous_pending_tree_was_impl_side(bool value) { previous_pending_tree_was_impl_side_ = value; _has_field_.set(43); }
|
|
|
|
bool has_processing_animation_worklets_for_active_tree() const { return _has_field_[44]; }
|
|
bool processing_animation_worklets_for_active_tree() const { return processing_animation_worklets_for_active_tree_; }
|
|
void set_processing_animation_worklets_for_active_tree(bool value) { processing_animation_worklets_for_active_tree_ = value; _has_field_.set(44); }
|
|
|
|
bool has_processing_animation_worklets_for_pending_tree() const { return _has_field_[45]; }
|
|
bool processing_animation_worklets_for_pending_tree() const { return processing_animation_worklets_for_pending_tree_; }
|
|
void set_processing_animation_worklets_for_pending_tree(bool value) { processing_animation_worklets_for_pending_tree_ = value; _has_field_.set(45); }
|
|
|
|
bool has_processing_paint_worklets_for_pending_tree() const { return _has_field_[46]; }
|
|
bool processing_paint_worklets_for_pending_tree() const { return processing_paint_worklets_for_pending_tree_; }
|
|
void set_processing_paint_worklets_for_pending_tree(bool value) { processing_paint_worklets_for_pending_tree_ = value; _has_field_.set(46); }
|
|
|
|
private:
|
|
int32_t commit_count_{};
|
|
int32_t current_frame_number_{};
|
|
int32_t last_frame_number_submit_performed_{};
|
|
int32_t last_frame_number_draw_performed_{};
|
|
int32_t last_frame_number_begin_main_frame_sent_{};
|
|
bool did_draw_{};
|
|
bool did_send_begin_main_frame_for_current_frame_{};
|
|
bool did_notify_begin_main_frame_not_expected_until_{};
|
|
bool did_notify_begin_main_frame_not_expected_soon_{};
|
|
bool wants_begin_main_frame_not_expected_{};
|
|
bool did_commit_during_frame_{};
|
|
bool did_invalidate_layer_tree_frame_sink_{};
|
|
bool did_perform_impl_side_invalidaion_{};
|
|
bool did_prepare_tiles_{};
|
|
int32_t consecutive_checkerboard_animations_{};
|
|
int32_t pending_submit_frames_{};
|
|
int32_t submit_frames_with_current_layer_tree_frame_sink_{};
|
|
bool needs_redraw_{};
|
|
bool needs_prepare_tiles_{};
|
|
bool needs_begin_main_frame_{};
|
|
bool needs_one_begin_impl_frame_{};
|
|
bool visible_{};
|
|
bool begin_frame_source_paused_{};
|
|
bool can_draw_{};
|
|
bool resourceless_draw_{};
|
|
bool has_pending_tree_{};
|
|
bool pending_tree_is_ready_for_activation_{};
|
|
bool active_tree_needs_first_draw_{};
|
|
bool active_tree_is_ready_to_draw_{};
|
|
bool did_create_and_initialize_first_layer_tree_frame_sink_{};
|
|
ChromeCompositorStateMachine_MinorState_TreePriority tree_priority_{};
|
|
ChromeCompositorStateMachine_MinorState_ScrollHandlerState scroll_handler_state_{};
|
|
bool critical_begin_main_frame_to_activate_is_fast_{};
|
|
bool main_thread_missed_last_deadline_{};
|
|
bool skip_next_begin_main_frame_to_reduce_latency_{};
|
|
bool video_needs_begin_frames_{};
|
|
bool defer_begin_main_frame_{};
|
|
bool last_commit_had_no_updates_{};
|
|
bool did_draw_in_last_frame_{};
|
|
bool did_submit_in_last_frame_{};
|
|
bool needs_impl_side_invalidation_{};
|
|
bool current_pending_tree_is_impl_side_{};
|
|
bool previous_pending_tree_was_impl_side_{};
|
|
bool processing_animation_worklets_for_active_tree_{};
|
|
bool processing_animation_worklets_for_pending_tree_{};
|
|
bool processing_paint_worklets_for_pending_tree_{};
|
|
|
|
// Allows to preserve unknown protobuf fields for compatibility
|
|
// with future versions of .proto files.
|
|
std::string unknown_fields_;
|
|
|
|
std::bitset<47> _has_field_{};
|
|
};
|
|
|
|
|
|
class PERFETTO_EXPORT ChromeCompositorStateMachine_MajorState : public ::protozero::CppMessageObj {
|
|
public:
|
|
using BeginImplFrameState = ChromeCompositorStateMachine_MajorState_BeginImplFrameState;
|
|
static constexpr auto BEGIN_IMPL_FRAME_UNSPECIFIED = ChromeCompositorStateMachine_MajorState_BeginImplFrameState_BEGIN_IMPL_FRAME_UNSPECIFIED;
|
|
static constexpr auto BEGIN_IMPL_FRAME_IDLE = ChromeCompositorStateMachine_MajorState_BeginImplFrameState_BEGIN_IMPL_FRAME_IDLE;
|
|
static constexpr auto BEGIN_IMPL_FRAME_INSIDE_BEGIN_FRAME = ChromeCompositorStateMachine_MajorState_BeginImplFrameState_BEGIN_IMPL_FRAME_INSIDE_BEGIN_FRAME;
|
|
static constexpr auto BEGIN_IMPL_FRAME_INSIDE_DEADLINE = ChromeCompositorStateMachine_MajorState_BeginImplFrameState_BEGIN_IMPL_FRAME_INSIDE_DEADLINE;
|
|
static constexpr auto BeginImplFrameState_MIN = ChromeCompositorStateMachine_MajorState_BeginImplFrameState_BEGIN_IMPL_FRAME_UNSPECIFIED;
|
|
static constexpr auto BeginImplFrameState_MAX = ChromeCompositorStateMachine_MajorState_BeginImplFrameState_BEGIN_IMPL_FRAME_INSIDE_DEADLINE;
|
|
using BeginMainFrameState = ChromeCompositorStateMachine_MajorState_BeginMainFrameState;
|
|
static constexpr auto BEGIN_MAIN_FRAME_UNSPECIFIED = ChromeCompositorStateMachine_MajorState_BeginMainFrameState_BEGIN_MAIN_FRAME_UNSPECIFIED;
|
|
static constexpr auto BEGIN_MAIN_FRAME_IDLE = ChromeCompositorStateMachine_MajorState_BeginMainFrameState_BEGIN_MAIN_FRAME_IDLE;
|
|
static constexpr auto BEGIN_MAIN_FRAME_SENT = ChromeCompositorStateMachine_MajorState_BeginMainFrameState_BEGIN_MAIN_FRAME_SENT;
|
|
static constexpr auto BEGIN_MAIN_FRAME_READY_TO_COMMIT = ChromeCompositorStateMachine_MajorState_BeginMainFrameState_BEGIN_MAIN_FRAME_READY_TO_COMMIT;
|
|
static constexpr auto BeginMainFrameState_MIN = ChromeCompositorStateMachine_MajorState_BeginMainFrameState_BEGIN_MAIN_FRAME_UNSPECIFIED;
|
|
static constexpr auto BeginMainFrameState_MAX = ChromeCompositorStateMachine_MajorState_BeginMainFrameState_BEGIN_MAIN_FRAME_READY_TO_COMMIT;
|
|
using LayerTreeFrameSinkState = ChromeCompositorStateMachine_MajorState_LayerTreeFrameSinkState;
|
|
static constexpr auto LAYER_TREE_FRAME_UNSPECIFIED = ChromeCompositorStateMachine_MajorState_LayerTreeFrameSinkState_LAYER_TREE_FRAME_UNSPECIFIED;
|
|
static constexpr auto LAYER_TREE_FRAME_NONE = ChromeCompositorStateMachine_MajorState_LayerTreeFrameSinkState_LAYER_TREE_FRAME_NONE;
|
|
static constexpr auto LAYER_TREE_FRAME_ACTIVE = ChromeCompositorStateMachine_MajorState_LayerTreeFrameSinkState_LAYER_TREE_FRAME_ACTIVE;
|
|
static constexpr auto LAYER_TREE_FRAME_CREATING = ChromeCompositorStateMachine_MajorState_LayerTreeFrameSinkState_LAYER_TREE_FRAME_CREATING;
|
|
static constexpr auto LAYER_TREE_FRAME_WAITING_FOR_FIRST_COMMIT = ChromeCompositorStateMachine_MajorState_LayerTreeFrameSinkState_LAYER_TREE_FRAME_WAITING_FOR_FIRST_COMMIT;
|
|
static constexpr auto LAYER_TREE_FRAME_WAITING_FOR_FIRST_ACTIVATION = ChromeCompositorStateMachine_MajorState_LayerTreeFrameSinkState_LAYER_TREE_FRAME_WAITING_FOR_FIRST_ACTIVATION;
|
|
static constexpr auto LayerTreeFrameSinkState_MIN = ChromeCompositorStateMachine_MajorState_LayerTreeFrameSinkState_LAYER_TREE_FRAME_UNSPECIFIED;
|
|
static constexpr auto LayerTreeFrameSinkState_MAX = ChromeCompositorStateMachine_MajorState_LayerTreeFrameSinkState_LAYER_TREE_FRAME_WAITING_FOR_FIRST_ACTIVATION;
|
|
using ForcedRedrawOnTimeoutState = ChromeCompositorStateMachine_MajorState_ForcedRedrawOnTimeoutState;
|
|
static constexpr auto FORCED_REDRAW_UNSPECIFIED = ChromeCompositorStateMachine_MajorState_ForcedRedrawOnTimeoutState_FORCED_REDRAW_UNSPECIFIED;
|
|
static constexpr auto FORCED_REDRAW_IDLE = ChromeCompositorStateMachine_MajorState_ForcedRedrawOnTimeoutState_FORCED_REDRAW_IDLE;
|
|
static constexpr auto FORCED_REDRAW_WAITING_FOR_COMMIT = ChromeCompositorStateMachine_MajorState_ForcedRedrawOnTimeoutState_FORCED_REDRAW_WAITING_FOR_COMMIT;
|
|
static constexpr auto FORCED_REDRAW_WAITING_FOR_ACTIVATION = ChromeCompositorStateMachine_MajorState_ForcedRedrawOnTimeoutState_FORCED_REDRAW_WAITING_FOR_ACTIVATION;
|
|
static constexpr auto FORCED_REDRAW_WAITING_FOR_DRAW = ChromeCompositorStateMachine_MajorState_ForcedRedrawOnTimeoutState_FORCED_REDRAW_WAITING_FOR_DRAW;
|
|
static constexpr auto ForcedRedrawOnTimeoutState_MIN = ChromeCompositorStateMachine_MajorState_ForcedRedrawOnTimeoutState_FORCED_REDRAW_UNSPECIFIED;
|
|
static constexpr auto ForcedRedrawOnTimeoutState_MAX = ChromeCompositorStateMachine_MajorState_ForcedRedrawOnTimeoutState_FORCED_REDRAW_WAITING_FOR_DRAW;
|
|
enum FieldNumbers {
|
|
kNextActionFieldNumber = 1,
|
|
kBeginImplFrameStateFieldNumber = 2,
|
|
kBeginMainFrameStateFieldNumber = 3,
|
|
kLayerTreeFrameSinkStateFieldNumber = 4,
|
|
kForcedRedrawStateFieldNumber = 5,
|
|
};
|
|
|
|
ChromeCompositorStateMachine_MajorState();
|
|
~ChromeCompositorStateMachine_MajorState() override;
|
|
ChromeCompositorStateMachine_MajorState(ChromeCompositorStateMachine_MajorState&&) noexcept;
|
|
ChromeCompositorStateMachine_MajorState& operator=(ChromeCompositorStateMachine_MajorState&&);
|
|
ChromeCompositorStateMachine_MajorState(const ChromeCompositorStateMachine_MajorState&);
|
|
ChromeCompositorStateMachine_MajorState& operator=(const ChromeCompositorStateMachine_MajorState&);
|
|
bool operator==(const ChromeCompositorStateMachine_MajorState&) const;
|
|
bool operator!=(const ChromeCompositorStateMachine_MajorState& other) const { return !(*this == other); }
|
|
|
|
bool ParseFromArray(const void*, size_t) override;
|
|
std::string SerializeAsString() const override;
|
|
std::vector<uint8_t> SerializeAsArray() const override;
|
|
void Serialize(::protozero::Message*) const;
|
|
|
|
bool has_next_action() const { return _has_field_[1]; }
|
|
ChromeCompositorSchedulerAction next_action() const { return next_action_; }
|
|
void set_next_action(ChromeCompositorSchedulerAction value) { next_action_ = value; _has_field_.set(1); }
|
|
|
|
bool has_begin_impl_frame_state() const { return _has_field_[2]; }
|
|
ChromeCompositorStateMachine_MajorState_BeginImplFrameState begin_impl_frame_state() const { return begin_impl_frame_state_; }
|
|
void set_begin_impl_frame_state(ChromeCompositorStateMachine_MajorState_BeginImplFrameState value) { begin_impl_frame_state_ = value; _has_field_.set(2); }
|
|
|
|
bool has_begin_main_frame_state() const { return _has_field_[3]; }
|
|
ChromeCompositorStateMachine_MajorState_BeginMainFrameState begin_main_frame_state() const { return begin_main_frame_state_; }
|
|
void set_begin_main_frame_state(ChromeCompositorStateMachine_MajorState_BeginMainFrameState value) { begin_main_frame_state_ = value; _has_field_.set(3); }
|
|
|
|
bool has_layer_tree_frame_sink_state() const { return _has_field_[4]; }
|
|
ChromeCompositorStateMachine_MajorState_LayerTreeFrameSinkState layer_tree_frame_sink_state() const { return layer_tree_frame_sink_state_; }
|
|
void set_layer_tree_frame_sink_state(ChromeCompositorStateMachine_MajorState_LayerTreeFrameSinkState value) { layer_tree_frame_sink_state_ = value; _has_field_.set(4); }
|
|
|
|
bool has_forced_redraw_state() const { return _has_field_[5]; }
|
|
ChromeCompositorStateMachine_MajorState_ForcedRedrawOnTimeoutState forced_redraw_state() const { return forced_redraw_state_; }
|
|
void set_forced_redraw_state(ChromeCompositorStateMachine_MajorState_ForcedRedrawOnTimeoutState value) { forced_redraw_state_ = value; _has_field_.set(5); }
|
|
|
|
private:
|
|
ChromeCompositorSchedulerAction next_action_{};
|
|
ChromeCompositorStateMachine_MajorState_BeginImplFrameState begin_impl_frame_state_{};
|
|
ChromeCompositorStateMachine_MajorState_BeginMainFrameState begin_main_frame_state_{};
|
|
ChromeCompositorStateMachine_MajorState_LayerTreeFrameSinkState layer_tree_frame_sink_state_{};
|
|
ChromeCompositorStateMachine_MajorState_ForcedRedrawOnTimeoutState forced_redraw_state_{};
|
|
|
|
// Allows to preserve unknown protobuf fields for compatibility
|
|
// with future versions of .proto files.
|
|
std::string unknown_fields_;
|
|
|
|
std::bitset<6> _has_field_{};
|
|
};
|
|
|
|
|
|
class PERFETTO_EXPORT ChromeCompositorSchedulerState : public ::protozero::CppMessageObj {
|
|
public:
|
|
using BeginImplFrameDeadlineMode = ChromeCompositorSchedulerState_BeginImplFrameDeadlineMode;
|
|
static constexpr auto DEADLINE_MODE_UNSPECIFIED = ChromeCompositorSchedulerState_BeginImplFrameDeadlineMode_DEADLINE_MODE_UNSPECIFIED;
|
|
static constexpr auto DEADLINE_MODE_NONE = ChromeCompositorSchedulerState_BeginImplFrameDeadlineMode_DEADLINE_MODE_NONE;
|
|
static constexpr auto DEADLINE_MODE_IMMEDIATE = ChromeCompositorSchedulerState_BeginImplFrameDeadlineMode_DEADLINE_MODE_IMMEDIATE;
|
|
static constexpr auto DEADLINE_MODE_REGULAR = ChromeCompositorSchedulerState_BeginImplFrameDeadlineMode_DEADLINE_MODE_REGULAR;
|
|
static constexpr auto DEADLINE_MODE_LATE = ChromeCompositorSchedulerState_BeginImplFrameDeadlineMode_DEADLINE_MODE_LATE;
|
|
static constexpr auto DEADLINE_MODE_BLOCKED = ChromeCompositorSchedulerState_BeginImplFrameDeadlineMode_DEADLINE_MODE_BLOCKED;
|
|
static constexpr auto BeginImplFrameDeadlineMode_MIN = ChromeCompositorSchedulerState_BeginImplFrameDeadlineMode_DEADLINE_MODE_UNSPECIFIED;
|
|
static constexpr auto BeginImplFrameDeadlineMode_MAX = ChromeCompositorSchedulerState_BeginImplFrameDeadlineMode_DEADLINE_MODE_BLOCKED;
|
|
enum FieldNumbers {
|
|
kStateMachineFieldNumber = 1,
|
|
kObservingBeginFrameSourceFieldNumber = 2,
|
|
kBeginImplFrameDeadlineTaskFieldNumber = 3,
|
|
kPendingBeginFrameTaskFieldNumber = 4,
|
|
kSkippedLastFrameMissedExceededDeadlineFieldNumber = 5,
|
|
kSkippedLastFrameToReduceLatencyFieldNumber = 6,
|
|
kInsideActionFieldNumber = 7,
|
|
kDeadlineModeFieldNumber = 8,
|
|
kDeadlineUsFieldNumber = 9,
|
|
kDeadlineScheduledAtUsFieldNumber = 10,
|
|
kNowUsFieldNumber = 11,
|
|
kNowToDeadlineDeltaUsFieldNumber = 12,
|
|
kNowToDeadlineScheduledAtDeltaUsFieldNumber = 13,
|
|
kBeginImplFrameArgsFieldNumber = 14,
|
|
kBeginFrameObserverStateFieldNumber = 15,
|
|
kBeginFrameSourceStateFieldNumber = 16,
|
|
kCompositorTimingHistoryFieldNumber = 17,
|
|
};
|
|
|
|
ChromeCompositorSchedulerState();
|
|
~ChromeCompositorSchedulerState() override;
|
|
ChromeCompositorSchedulerState(ChromeCompositorSchedulerState&&) noexcept;
|
|
ChromeCompositorSchedulerState& operator=(ChromeCompositorSchedulerState&&);
|
|
ChromeCompositorSchedulerState(const ChromeCompositorSchedulerState&);
|
|
ChromeCompositorSchedulerState& operator=(const ChromeCompositorSchedulerState&);
|
|
bool operator==(const ChromeCompositorSchedulerState&) const;
|
|
bool operator!=(const ChromeCompositorSchedulerState& other) const { return !(*this == other); }
|
|
|
|
bool ParseFromArray(const void*, size_t) override;
|
|
std::string SerializeAsString() const override;
|
|
std::vector<uint8_t> SerializeAsArray() const override;
|
|
void Serialize(::protozero::Message*) const;
|
|
|
|
bool has_state_machine() const { return _has_field_[1]; }
|
|
const ChromeCompositorStateMachine& state_machine() const { return *state_machine_; }
|
|
ChromeCompositorStateMachine* mutable_state_machine() { _has_field_.set(1); return state_machine_.get(); }
|
|
|
|
bool has_observing_begin_frame_source() const { return _has_field_[2]; }
|
|
bool observing_begin_frame_source() const { return observing_begin_frame_source_; }
|
|
void set_observing_begin_frame_source(bool value) { observing_begin_frame_source_ = value; _has_field_.set(2); }
|
|
|
|
bool has_begin_impl_frame_deadline_task() const { return _has_field_[3]; }
|
|
bool begin_impl_frame_deadline_task() const { return begin_impl_frame_deadline_task_; }
|
|
void set_begin_impl_frame_deadline_task(bool value) { begin_impl_frame_deadline_task_ = value; _has_field_.set(3); }
|
|
|
|
bool has_pending_begin_frame_task() const { return _has_field_[4]; }
|
|
bool pending_begin_frame_task() const { return pending_begin_frame_task_; }
|
|
void set_pending_begin_frame_task(bool value) { pending_begin_frame_task_ = value; _has_field_.set(4); }
|
|
|
|
bool has_skipped_last_frame_missed_exceeded_deadline() const { return _has_field_[5]; }
|
|
bool skipped_last_frame_missed_exceeded_deadline() const { return skipped_last_frame_missed_exceeded_deadline_; }
|
|
void set_skipped_last_frame_missed_exceeded_deadline(bool value) { skipped_last_frame_missed_exceeded_deadline_ = value; _has_field_.set(5); }
|
|
|
|
bool has_skipped_last_frame_to_reduce_latency() const { return _has_field_[6]; }
|
|
bool skipped_last_frame_to_reduce_latency() const { return skipped_last_frame_to_reduce_latency_; }
|
|
void set_skipped_last_frame_to_reduce_latency(bool value) { skipped_last_frame_to_reduce_latency_ = value; _has_field_.set(6); }
|
|
|
|
bool has_inside_action() const { return _has_field_[7]; }
|
|
ChromeCompositorSchedulerAction inside_action() const { return inside_action_; }
|
|
void set_inside_action(ChromeCompositorSchedulerAction value) { inside_action_ = value; _has_field_.set(7); }
|
|
|
|
bool has_deadline_mode() const { return _has_field_[8]; }
|
|
ChromeCompositorSchedulerState_BeginImplFrameDeadlineMode deadline_mode() const { return deadline_mode_; }
|
|
void set_deadline_mode(ChromeCompositorSchedulerState_BeginImplFrameDeadlineMode value) { deadline_mode_ = value; _has_field_.set(8); }
|
|
|
|
bool has_deadline_us() const { return _has_field_[9]; }
|
|
int64_t deadline_us() const { return deadline_us_; }
|
|
void set_deadline_us(int64_t value) { deadline_us_ = value; _has_field_.set(9); }
|
|
|
|
bool has_deadline_scheduled_at_us() const { return _has_field_[10]; }
|
|
int64_t deadline_scheduled_at_us() const { return deadline_scheduled_at_us_; }
|
|
void set_deadline_scheduled_at_us(int64_t value) { deadline_scheduled_at_us_ = value; _has_field_.set(10); }
|
|
|
|
bool has_now_us() const { return _has_field_[11]; }
|
|
int64_t now_us() const { return now_us_; }
|
|
void set_now_us(int64_t value) { now_us_ = value; _has_field_.set(11); }
|
|
|
|
bool has_now_to_deadline_delta_us() const { return _has_field_[12]; }
|
|
int64_t now_to_deadline_delta_us() const { return now_to_deadline_delta_us_; }
|
|
void set_now_to_deadline_delta_us(int64_t value) { now_to_deadline_delta_us_ = value; _has_field_.set(12); }
|
|
|
|
bool has_now_to_deadline_scheduled_at_delta_us() const { return _has_field_[13]; }
|
|
int64_t now_to_deadline_scheduled_at_delta_us() const { return now_to_deadline_scheduled_at_delta_us_; }
|
|
void set_now_to_deadline_scheduled_at_delta_us(int64_t value) { now_to_deadline_scheduled_at_delta_us_ = value; _has_field_.set(13); }
|
|
|
|
bool has_begin_impl_frame_args() const { return _has_field_[14]; }
|
|
const BeginImplFrameArgs& begin_impl_frame_args() const { return *begin_impl_frame_args_; }
|
|
BeginImplFrameArgs* mutable_begin_impl_frame_args() { _has_field_.set(14); return begin_impl_frame_args_.get(); }
|
|
|
|
bool has_begin_frame_observer_state() const { return _has_field_[15]; }
|
|
const BeginFrameObserverState& begin_frame_observer_state() const { return *begin_frame_observer_state_; }
|
|
BeginFrameObserverState* mutable_begin_frame_observer_state() { _has_field_.set(15); return begin_frame_observer_state_.get(); }
|
|
|
|
bool has_begin_frame_source_state() const { return _has_field_[16]; }
|
|
const BeginFrameSourceState& begin_frame_source_state() const { return *begin_frame_source_state_; }
|
|
BeginFrameSourceState* mutable_begin_frame_source_state() { _has_field_.set(16); return begin_frame_source_state_.get(); }
|
|
|
|
bool has_compositor_timing_history() const { return _has_field_[17]; }
|
|
const CompositorTimingHistory& compositor_timing_history() const { return *compositor_timing_history_; }
|
|
CompositorTimingHistory* mutable_compositor_timing_history() { _has_field_.set(17); return compositor_timing_history_.get(); }
|
|
|
|
private:
|
|
::protozero::CopyablePtr<ChromeCompositorStateMachine> state_machine_;
|
|
bool observing_begin_frame_source_{};
|
|
bool begin_impl_frame_deadline_task_{};
|
|
bool pending_begin_frame_task_{};
|
|
bool skipped_last_frame_missed_exceeded_deadline_{};
|
|
bool skipped_last_frame_to_reduce_latency_{};
|
|
ChromeCompositorSchedulerAction inside_action_{};
|
|
ChromeCompositorSchedulerState_BeginImplFrameDeadlineMode deadline_mode_{};
|
|
int64_t deadline_us_{};
|
|
int64_t deadline_scheduled_at_us_{};
|
|
int64_t now_us_{};
|
|
int64_t now_to_deadline_delta_us_{};
|
|
int64_t now_to_deadline_scheduled_at_delta_us_{};
|
|
::protozero::CopyablePtr<BeginImplFrameArgs> begin_impl_frame_args_;
|
|
::protozero::CopyablePtr<BeginFrameObserverState> begin_frame_observer_state_;
|
|
::protozero::CopyablePtr<BeginFrameSourceState> begin_frame_source_state_;
|
|
::protozero::CopyablePtr<CompositorTimingHistory> compositor_timing_history_;
|
|
|
|
// Allows to preserve unknown protobuf fields for compatibility
|
|
// with future versions of .proto files.
|
|
std::string unknown_fields_;
|
|
|
|
std::bitset<18> _has_field_{};
|
|
};
|
|
|
|
} // namespace perfetto
|
|
} // namespace protos
|
|
} // namespace gen
|
|
|
|
#endif // PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_CHROME_COMPOSITOR_SCHEDULER_STATE_PROTO_CPP_H_
|
|
// gen_amalgamated begin header: gen/protos/perfetto/trace/track_event/source_location.gen.h
|
|
// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
|
|
#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_SOURCE_LOCATION_PROTO_CPP_H_
|
|
#define PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_SOURCE_LOCATION_PROTO_CPP_H_
|
|
|
|
#include <stdint.h>
|
|
#include <bitset>
|
|
#include <vector>
|
|
#include <string>
|
|
#include <type_traits>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/cpp_message_obj.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/copyable_ptr.h"
|
|
// gen_amalgamated expanded: #include "perfetto/base/export.h"
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace gen {
|
|
class SourceLocation;
|
|
} // namespace perfetto
|
|
} // namespace protos
|
|
} // namespace gen
|
|
|
|
namespace protozero {
|
|
class Message;
|
|
} // namespace protozero
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace gen {
|
|
|
|
class PERFETTO_EXPORT SourceLocation : public ::protozero::CppMessageObj {
|
|
public:
|
|
enum FieldNumbers {
|
|
kIidFieldNumber = 1,
|
|
kFileNameFieldNumber = 2,
|
|
kFunctionNameFieldNumber = 3,
|
|
kLineNumberFieldNumber = 4,
|
|
};
|
|
|
|
SourceLocation();
|
|
~SourceLocation() override;
|
|
SourceLocation(SourceLocation&&) noexcept;
|
|
SourceLocation& operator=(SourceLocation&&);
|
|
SourceLocation(const SourceLocation&);
|
|
SourceLocation& operator=(const SourceLocation&);
|
|
bool operator==(const SourceLocation&) const;
|
|
bool operator!=(const SourceLocation& other) const { return !(*this == other); }
|
|
|
|
bool ParseFromArray(const void*, size_t) override;
|
|
std::string SerializeAsString() const override;
|
|
std::vector<uint8_t> SerializeAsArray() const override;
|
|
void Serialize(::protozero::Message*) const;
|
|
|
|
bool has_iid() const { return _has_field_[1]; }
|
|
uint64_t iid() const { return iid_; }
|
|
void set_iid(uint64_t value) { iid_ = value; _has_field_.set(1); }
|
|
|
|
bool has_file_name() const { return _has_field_[2]; }
|
|
const std::string& file_name() const { return file_name_; }
|
|
void set_file_name(const std::string& value) { file_name_ = value; _has_field_.set(2); }
|
|
|
|
bool has_function_name() const { return _has_field_[3]; }
|
|
const std::string& function_name() const { return function_name_; }
|
|
void set_function_name(const std::string& value) { function_name_ = value; _has_field_.set(3); }
|
|
|
|
bool has_line_number() const { return _has_field_[4]; }
|
|
uint32_t line_number() const { return line_number_; }
|
|
void set_line_number(uint32_t value) { line_number_ = value; _has_field_.set(4); }
|
|
|
|
private:
|
|
uint64_t iid_{};
|
|
std::string file_name_{};
|
|
std::string function_name_{};
|
|
uint32_t line_number_{};
|
|
|
|
// Allows to preserve unknown protobuf fields for compatibility
|
|
// with future versions of .proto files.
|
|
std::string unknown_fields_;
|
|
|
|
std::bitset<5> _has_field_{};
|
|
};
|
|
|
|
} // namespace perfetto
|
|
} // namespace protos
|
|
} // namespace gen
|
|
|
|
#endif // PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_SOURCE_LOCATION_PROTO_CPP_H_
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/scattered_heap_buffer.h"
|
|
// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
|
|
#pragma GCC diagnostic push
|
|
#pragma GCC diagnostic ignored "-Wfloat-equal"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/trace/track_event/chrome_compositor_scheduler_state.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/trace/track_event/source_location.gen.h"
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace gen {
|
|
|
|
CompositorTimingHistory::CompositorTimingHistory() = default;
|
|
CompositorTimingHistory::~CompositorTimingHistory() = default;
|
|
CompositorTimingHistory::CompositorTimingHistory(const CompositorTimingHistory&) = default;
|
|
CompositorTimingHistory& CompositorTimingHistory::operator=(const CompositorTimingHistory&) = default;
|
|
CompositorTimingHistory::CompositorTimingHistory(CompositorTimingHistory&&) noexcept = default;
|
|
CompositorTimingHistory& CompositorTimingHistory::operator=(CompositorTimingHistory&&) = default;
|
|
|
|
bool CompositorTimingHistory::operator==(const CompositorTimingHistory& other) const {
|
|
return unknown_fields_ == other.unknown_fields_
|
|
&& begin_main_frame_queue_critical_estimate_delta_us_ == other.begin_main_frame_queue_critical_estimate_delta_us_
|
|
&& begin_main_frame_queue_not_critical_estimate_delta_us_ == other.begin_main_frame_queue_not_critical_estimate_delta_us_
|
|
&& begin_main_frame_start_to_ready_to_commit_estimate_delta_us_ == other.begin_main_frame_start_to_ready_to_commit_estimate_delta_us_
|
|
&& commit_to_ready_to_activate_estimate_delta_us_ == other.commit_to_ready_to_activate_estimate_delta_us_
|
|
&& prepare_tiles_estimate_delta_us_ == other.prepare_tiles_estimate_delta_us_
|
|
&& activate_estimate_delta_us_ == other.activate_estimate_delta_us_
|
|
&& draw_estimate_delta_us_ == other.draw_estimate_delta_us_;
|
|
}
|
|
|
|
bool CompositorTimingHistory::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* begin_main_frame_queue_critical_estimate_delta_us */:
|
|
field.get(&begin_main_frame_queue_critical_estimate_delta_us_);
|
|
break;
|
|
case 2 /* begin_main_frame_queue_not_critical_estimate_delta_us */:
|
|
field.get(&begin_main_frame_queue_not_critical_estimate_delta_us_);
|
|
break;
|
|
case 3 /* begin_main_frame_start_to_ready_to_commit_estimate_delta_us */:
|
|
field.get(&begin_main_frame_start_to_ready_to_commit_estimate_delta_us_);
|
|
break;
|
|
case 4 /* commit_to_ready_to_activate_estimate_delta_us */:
|
|
field.get(&commit_to_ready_to_activate_estimate_delta_us_);
|
|
break;
|
|
case 5 /* prepare_tiles_estimate_delta_us */:
|
|
field.get(&prepare_tiles_estimate_delta_us_);
|
|
break;
|
|
case 6 /* activate_estimate_delta_us */:
|
|
field.get(&activate_estimate_delta_us_);
|
|
break;
|
|
case 7 /* draw_estimate_delta_us */:
|
|
field.get(&draw_estimate_delta_us_);
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string CompositorTimingHistory::SerializeAsString() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> CompositorTimingHistory::SerializeAsArray() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void CompositorTimingHistory::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: begin_main_frame_queue_critical_estimate_delta_us
|
|
if (_has_field_[1]) {
|
|
msg->AppendVarInt(1, begin_main_frame_queue_critical_estimate_delta_us_);
|
|
}
|
|
|
|
// Field 2: begin_main_frame_queue_not_critical_estimate_delta_us
|
|
if (_has_field_[2]) {
|
|
msg->AppendVarInt(2, begin_main_frame_queue_not_critical_estimate_delta_us_);
|
|
}
|
|
|
|
// Field 3: begin_main_frame_start_to_ready_to_commit_estimate_delta_us
|
|
if (_has_field_[3]) {
|
|
msg->AppendVarInt(3, begin_main_frame_start_to_ready_to_commit_estimate_delta_us_);
|
|
}
|
|
|
|
// Field 4: commit_to_ready_to_activate_estimate_delta_us
|
|
if (_has_field_[4]) {
|
|
msg->AppendVarInt(4, commit_to_ready_to_activate_estimate_delta_us_);
|
|
}
|
|
|
|
// Field 5: prepare_tiles_estimate_delta_us
|
|
if (_has_field_[5]) {
|
|
msg->AppendVarInt(5, prepare_tiles_estimate_delta_us_);
|
|
}
|
|
|
|
// Field 6: activate_estimate_delta_us
|
|
if (_has_field_[6]) {
|
|
msg->AppendVarInt(6, activate_estimate_delta_us_);
|
|
}
|
|
|
|
// Field 7: draw_estimate_delta_us
|
|
if (_has_field_[7]) {
|
|
msg->AppendVarInt(7, draw_estimate_delta_us_);
|
|
}
|
|
|
|
msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
|
|
}
|
|
|
|
|
|
BeginFrameSourceState::BeginFrameSourceState() = default;
|
|
BeginFrameSourceState::~BeginFrameSourceState() = default;
|
|
BeginFrameSourceState::BeginFrameSourceState(const BeginFrameSourceState&) = default;
|
|
BeginFrameSourceState& BeginFrameSourceState::operator=(const BeginFrameSourceState&) = default;
|
|
BeginFrameSourceState::BeginFrameSourceState(BeginFrameSourceState&&) noexcept = default;
|
|
BeginFrameSourceState& BeginFrameSourceState::operator=(BeginFrameSourceState&&) = default;
|
|
|
|
bool BeginFrameSourceState::operator==(const BeginFrameSourceState& other) const {
|
|
return unknown_fields_ == other.unknown_fields_
|
|
&& source_id_ == other.source_id_
|
|
&& paused_ == other.paused_
|
|
&& num_observers_ == other.num_observers_
|
|
&& last_begin_frame_args_ == other.last_begin_frame_args_;
|
|
}
|
|
|
|
bool BeginFrameSourceState::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* source_id */:
|
|
field.get(&source_id_);
|
|
break;
|
|
case 2 /* paused */:
|
|
field.get(&paused_);
|
|
break;
|
|
case 3 /* num_observers */:
|
|
field.get(&num_observers_);
|
|
break;
|
|
case 4 /* last_begin_frame_args */:
|
|
(*last_begin_frame_args_).ParseFromString(field.as_std_string());
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string BeginFrameSourceState::SerializeAsString() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> BeginFrameSourceState::SerializeAsArray() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void BeginFrameSourceState::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: source_id
|
|
if (_has_field_[1]) {
|
|
msg->AppendVarInt(1, source_id_);
|
|
}
|
|
|
|
// Field 2: paused
|
|
if (_has_field_[2]) {
|
|
msg->AppendTinyVarInt(2, paused_);
|
|
}
|
|
|
|
// Field 3: num_observers
|
|
if (_has_field_[3]) {
|
|
msg->AppendVarInt(3, num_observers_);
|
|
}
|
|
|
|
// Field 4: last_begin_frame_args
|
|
if (_has_field_[4]) {
|
|
(*last_begin_frame_args_).Serialize(msg->BeginNestedMessage<::protozero::Message>(4));
|
|
}
|
|
|
|
msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
|
|
}
|
|
|
|
|
|
BeginFrameArgs::BeginFrameArgs() = default;
|
|
BeginFrameArgs::~BeginFrameArgs() = default;
|
|
BeginFrameArgs::BeginFrameArgs(const BeginFrameArgs&) = default;
|
|
BeginFrameArgs& BeginFrameArgs::operator=(const BeginFrameArgs&) = default;
|
|
BeginFrameArgs::BeginFrameArgs(BeginFrameArgs&&) noexcept = default;
|
|
BeginFrameArgs& BeginFrameArgs::operator=(BeginFrameArgs&&) = default;
|
|
|
|
bool BeginFrameArgs::operator==(const BeginFrameArgs& other) const {
|
|
return unknown_fields_ == other.unknown_fields_
|
|
&& type_ == other.type_
|
|
&& source_id_ == other.source_id_
|
|
&& sequence_number_ == other.sequence_number_
|
|
&& frame_time_us_ == other.frame_time_us_
|
|
&& deadline_us_ == other.deadline_us_
|
|
&& interval_delta_us_ == other.interval_delta_us_
|
|
&& on_critical_path_ == other.on_critical_path_
|
|
&& animate_only_ == other.animate_only_
|
|
&& source_location_iid_ == other.source_location_iid_
|
|
&& source_location_ == other.source_location_;
|
|
}
|
|
|
|
bool BeginFrameArgs::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* type */:
|
|
field.get(&type_);
|
|
break;
|
|
case 2 /* source_id */:
|
|
field.get(&source_id_);
|
|
break;
|
|
case 3 /* sequence_number */:
|
|
field.get(&sequence_number_);
|
|
break;
|
|
case 4 /* frame_time_us */:
|
|
field.get(&frame_time_us_);
|
|
break;
|
|
case 5 /* deadline_us */:
|
|
field.get(&deadline_us_);
|
|
break;
|
|
case 6 /* interval_delta_us */:
|
|
field.get(&interval_delta_us_);
|
|
break;
|
|
case 7 /* on_critical_path */:
|
|
field.get(&on_critical_path_);
|
|
break;
|
|
case 8 /* animate_only */:
|
|
field.get(&animate_only_);
|
|
break;
|
|
case 9 /* source_location_iid */:
|
|
field.get(&source_location_iid_);
|
|
break;
|
|
case 10 /* source_location */:
|
|
(*source_location_).ParseFromString(field.as_std_string());
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string BeginFrameArgs::SerializeAsString() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> BeginFrameArgs::SerializeAsArray() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void BeginFrameArgs::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: type
|
|
if (_has_field_[1]) {
|
|
msg->AppendVarInt(1, type_);
|
|
}
|
|
|
|
// Field 2: source_id
|
|
if (_has_field_[2]) {
|
|
msg->AppendVarInt(2, source_id_);
|
|
}
|
|
|
|
// Field 3: sequence_number
|
|
if (_has_field_[3]) {
|
|
msg->AppendVarInt(3, sequence_number_);
|
|
}
|
|
|
|
// Field 4: frame_time_us
|
|
if (_has_field_[4]) {
|
|
msg->AppendVarInt(4, frame_time_us_);
|
|
}
|
|
|
|
// Field 5: deadline_us
|
|
if (_has_field_[5]) {
|
|
msg->AppendVarInt(5, deadline_us_);
|
|
}
|
|
|
|
// Field 6: interval_delta_us
|
|
if (_has_field_[6]) {
|
|
msg->AppendVarInt(6, interval_delta_us_);
|
|
}
|
|
|
|
// Field 7: on_critical_path
|
|
if (_has_field_[7]) {
|
|
msg->AppendTinyVarInt(7, on_critical_path_);
|
|
}
|
|
|
|
// Field 8: animate_only
|
|
if (_has_field_[8]) {
|
|
msg->AppendTinyVarInt(8, animate_only_);
|
|
}
|
|
|
|
// Field 9: source_location_iid
|
|
if (_has_field_[9]) {
|
|
msg->AppendVarInt(9, source_location_iid_);
|
|
}
|
|
|
|
// Field 10: source_location
|
|
if (_has_field_[10]) {
|
|
(*source_location_).Serialize(msg->BeginNestedMessage<::protozero::Message>(10));
|
|
}
|
|
|
|
msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
|
|
}
|
|
|
|
|
|
BeginFrameObserverState::BeginFrameObserverState() = default;
|
|
BeginFrameObserverState::~BeginFrameObserverState() = default;
|
|
BeginFrameObserverState::BeginFrameObserverState(const BeginFrameObserverState&) = default;
|
|
BeginFrameObserverState& BeginFrameObserverState::operator=(const BeginFrameObserverState&) = default;
|
|
BeginFrameObserverState::BeginFrameObserverState(BeginFrameObserverState&&) noexcept = default;
|
|
BeginFrameObserverState& BeginFrameObserverState::operator=(BeginFrameObserverState&&) = default;
|
|
|
|
bool BeginFrameObserverState::operator==(const BeginFrameObserverState& other) const {
|
|
return unknown_fields_ == other.unknown_fields_
|
|
&& dropped_begin_frame_args_ == other.dropped_begin_frame_args_
|
|
&& last_begin_frame_args_ == other.last_begin_frame_args_;
|
|
}
|
|
|
|
bool BeginFrameObserverState::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* dropped_begin_frame_args */:
|
|
field.get(&dropped_begin_frame_args_);
|
|
break;
|
|
case 2 /* last_begin_frame_args */:
|
|
(*last_begin_frame_args_).ParseFromString(field.as_std_string());
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string BeginFrameObserverState::SerializeAsString() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> BeginFrameObserverState::SerializeAsArray() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void BeginFrameObserverState::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: dropped_begin_frame_args
|
|
if (_has_field_[1]) {
|
|
msg->AppendVarInt(1, dropped_begin_frame_args_);
|
|
}
|
|
|
|
// Field 2: last_begin_frame_args
|
|
if (_has_field_[2]) {
|
|
(*last_begin_frame_args_).Serialize(msg->BeginNestedMessage<::protozero::Message>(2));
|
|
}
|
|
|
|
msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
|
|
}
|
|
|
|
|
|
BeginImplFrameArgs::BeginImplFrameArgs() = default;
|
|
BeginImplFrameArgs::~BeginImplFrameArgs() = default;
|
|
BeginImplFrameArgs::BeginImplFrameArgs(const BeginImplFrameArgs&) = default;
|
|
BeginImplFrameArgs& BeginImplFrameArgs::operator=(const BeginImplFrameArgs&) = default;
|
|
BeginImplFrameArgs::BeginImplFrameArgs(BeginImplFrameArgs&&) noexcept = default;
|
|
BeginImplFrameArgs& BeginImplFrameArgs::operator=(BeginImplFrameArgs&&) = default;
|
|
|
|
bool BeginImplFrameArgs::operator==(const BeginImplFrameArgs& other) const {
|
|
return unknown_fields_ == other.unknown_fields_
|
|
&& updated_at_us_ == other.updated_at_us_
|
|
&& finished_at_us_ == other.finished_at_us_
|
|
&& state_ == other.state_
|
|
&& current_args_ == other.current_args_
|
|
&& last_args_ == other.last_args_
|
|
&& timestamps_in_us_ == other.timestamps_in_us_;
|
|
}
|
|
|
|
bool BeginImplFrameArgs::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* updated_at_us */:
|
|
field.get(&updated_at_us_);
|
|
break;
|
|
case 2 /* finished_at_us */:
|
|
field.get(&finished_at_us_);
|
|
break;
|
|
case 3 /* state */:
|
|
field.get(&state_);
|
|
break;
|
|
case 4 /* current_args */:
|
|
(*current_args_).ParseFromString(field.as_std_string());
|
|
break;
|
|
case 5 /* last_args */:
|
|
(*last_args_).ParseFromString(field.as_std_string());
|
|
break;
|
|
case 6 /* timestamps_in_us */:
|
|
(*timestamps_in_us_).ParseFromString(field.as_std_string());
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string BeginImplFrameArgs::SerializeAsString() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> BeginImplFrameArgs::SerializeAsArray() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void BeginImplFrameArgs::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: updated_at_us
|
|
if (_has_field_[1]) {
|
|
msg->AppendVarInt(1, updated_at_us_);
|
|
}
|
|
|
|
// Field 2: finished_at_us
|
|
if (_has_field_[2]) {
|
|
msg->AppendVarInt(2, finished_at_us_);
|
|
}
|
|
|
|
// Field 3: state
|
|
if (_has_field_[3]) {
|
|
msg->AppendVarInt(3, state_);
|
|
}
|
|
|
|
// Field 4: current_args
|
|
if (_has_field_[4]) {
|
|
(*current_args_).Serialize(msg->BeginNestedMessage<::protozero::Message>(4));
|
|
}
|
|
|
|
// Field 5: last_args
|
|
if (_has_field_[5]) {
|
|
(*last_args_).Serialize(msg->BeginNestedMessage<::protozero::Message>(5));
|
|
}
|
|
|
|
// Field 6: timestamps_in_us
|
|
if (_has_field_[6]) {
|
|
(*timestamps_in_us_).Serialize(msg->BeginNestedMessage<::protozero::Message>(6));
|
|
}
|
|
|
|
msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
|
|
}
|
|
|
|
|
|
BeginImplFrameArgs_TimestampsInUs::BeginImplFrameArgs_TimestampsInUs() = default;
|
|
BeginImplFrameArgs_TimestampsInUs::~BeginImplFrameArgs_TimestampsInUs() = default;
|
|
BeginImplFrameArgs_TimestampsInUs::BeginImplFrameArgs_TimestampsInUs(const BeginImplFrameArgs_TimestampsInUs&) = default;
|
|
BeginImplFrameArgs_TimestampsInUs& BeginImplFrameArgs_TimestampsInUs::operator=(const BeginImplFrameArgs_TimestampsInUs&) = default;
|
|
BeginImplFrameArgs_TimestampsInUs::BeginImplFrameArgs_TimestampsInUs(BeginImplFrameArgs_TimestampsInUs&&) noexcept = default;
|
|
BeginImplFrameArgs_TimestampsInUs& BeginImplFrameArgs_TimestampsInUs::operator=(BeginImplFrameArgs_TimestampsInUs&&) = default;
|
|
|
|
bool BeginImplFrameArgs_TimestampsInUs::operator==(const BeginImplFrameArgs_TimestampsInUs& other) const {
|
|
return unknown_fields_ == other.unknown_fields_
|
|
&& interval_delta_ == other.interval_delta_
|
|
&& now_to_deadline_delta_ == other.now_to_deadline_delta_
|
|
&& frame_time_to_now_delta_ == other.frame_time_to_now_delta_
|
|
&& frame_time_to_deadline_delta_ == other.frame_time_to_deadline_delta_
|
|
&& now_ == other.now_
|
|
&& frame_time_ == other.frame_time_
|
|
&& deadline_ == other.deadline_;
|
|
}
|
|
|
|
bool BeginImplFrameArgs_TimestampsInUs::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* interval_delta */:
|
|
field.get(&interval_delta_);
|
|
break;
|
|
case 2 /* now_to_deadline_delta */:
|
|
field.get(&now_to_deadline_delta_);
|
|
break;
|
|
case 3 /* frame_time_to_now_delta */:
|
|
field.get(&frame_time_to_now_delta_);
|
|
break;
|
|
case 4 /* frame_time_to_deadline_delta */:
|
|
field.get(&frame_time_to_deadline_delta_);
|
|
break;
|
|
case 5 /* now */:
|
|
field.get(&now_);
|
|
break;
|
|
case 6 /* frame_time */:
|
|
field.get(&frame_time_);
|
|
break;
|
|
case 7 /* deadline */:
|
|
field.get(&deadline_);
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string BeginImplFrameArgs_TimestampsInUs::SerializeAsString() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> BeginImplFrameArgs_TimestampsInUs::SerializeAsArray() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void BeginImplFrameArgs_TimestampsInUs::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: interval_delta
|
|
if (_has_field_[1]) {
|
|
msg->AppendVarInt(1, interval_delta_);
|
|
}
|
|
|
|
// Field 2: now_to_deadline_delta
|
|
if (_has_field_[2]) {
|
|
msg->AppendVarInt(2, now_to_deadline_delta_);
|
|
}
|
|
|
|
// Field 3: frame_time_to_now_delta
|
|
if (_has_field_[3]) {
|
|
msg->AppendVarInt(3, frame_time_to_now_delta_);
|
|
}
|
|
|
|
// Field 4: frame_time_to_deadline_delta
|
|
if (_has_field_[4]) {
|
|
msg->AppendVarInt(4, frame_time_to_deadline_delta_);
|
|
}
|
|
|
|
// Field 5: now
|
|
if (_has_field_[5]) {
|
|
msg->AppendVarInt(5, now_);
|
|
}
|
|
|
|
// Field 6: frame_time
|
|
if (_has_field_[6]) {
|
|
msg->AppendVarInt(6, frame_time_);
|
|
}
|
|
|
|
// Field 7: deadline
|
|
if (_has_field_[7]) {
|
|
msg->AppendVarInt(7, deadline_);
|
|
}
|
|
|
|
msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
|
|
}
|
|
|
|
|
|
ChromeCompositorStateMachine::ChromeCompositorStateMachine() = default;
|
|
ChromeCompositorStateMachine::~ChromeCompositorStateMachine() = default;
|
|
ChromeCompositorStateMachine::ChromeCompositorStateMachine(const ChromeCompositorStateMachine&) = default;
|
|
ChromeCompositorStateMachine& ChromeCompositorStateMachine::operator=(const ChromeCompositorStateMachine&) = default;
|
|
ChromeCompositorStateMachine::ChromeCompositorStateMachine(ChromeCompositorStateMachine&&) noexcept = default;
|
|
ChromeCompositorStateMachine& ChromeCompositorStateMachine::operator=(ChromeCompositorStateMachine&&) = default;
|
|
|
|
bool ChromeCompositorStateMachine::operator==(const ChromeCompositorStateMachine& other) const {
|
|
return unknown_fields_ == other.unknown_fields_
|
|
&& major_state_ == other.major_state_
|
|
&& minor_state_ == other.minor_state_;
|
|
}
|
|
|
|
bool ChromeCompositorStateMachine::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* major_state */:
|
|
(*major_state_).ParseFromString(field.as_std_string());
|
|
break;
|
|
case 2 /* minor_state */:
|
|
(*minor_state_).ParseFromString(field.as_std_string());
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string ChromeCompositorStateMachine::SerializeAsString() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> ChromeCompositorStateMachine::SerializeAsArray() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void ChromeCompositorStateMachine::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: major_state
|
|
if (_has_field_[1]) {
|
|
(*major_state_).Serialize(msg->BeginNestedMessage<::protozero::Message>(1));
|
|
}
|
|
|
|
// Field 2: minor_state
|
|
if (_has_field_[2]) {
|
|
(*minor_state_).Serialize(msg->BeginNestedMessage<::protozero::Message>(2));
|
|
}
|
|
|
|
msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
|
|
}
|
|
|
|
|
|
ChromeCompositorStateMachine_MinorState::ChromeCompositorStateMachine_MinorState() = default;
|
|
ChromeCompositorStateMachine_MinorState::~ChromeCompositorStateMachine_MinorState() = default;
|
|
ChromeCompositorStateMachine_MinorState::ChromeCompositorStateMachine_MinorState(const ChromeCompositorStateMachine_MinorState&) = default;
|
|
ChromeCompositorStateMachine_MinorState& ChromeCompositorStateMachine_MinorState::operator=(const ChromeCompositorStateMachine_MinorState&) = default;
|
|
ChromeCompositorStateMachine_MinorState::ChromeCompositorStateMachine_MinorState(ChromeCompositorStateMachine_MinorState&&) noexcept = default;
|
|
ChromeCompositorStateMachine_MinorState& ChromeCompositorStateMachine_MinorState::operator=(ChromeCompositorStateMachine_MinorState&&) = default;
|
|
|
|
bool ChromeCompositorStateMachine_MinorState::operator==(const ChromeCompositorStateMachine_MinorState& other) const {
|
|
return unknown_fields_ == other.unknown_fields_
|
|
&& commit_count_ == other.commit_count_
|
|
&& current_frame_number_ == other.current_frame_number_
|
|
&& last_frame_number_submit_performed_ == other.last_frame_number_submit_performed_
|
|
&& last_frame_number_draw_performed_ == other.last_frame_number_draw_performed_
|
|
&& last_frame_number_begin_main_frame_sent_ == other.last_frame_number_begin_main_frame_sent_
|
|
&& did_draw_ == other.did_draw_
|
|
&& did_send_begin_main_frame_for_current_frame_ == other.did_send_begin_main_frame_for_current_frame_
|
|
&& did_notify_begin_main_frame_not_expected_until_ == other.did_notify_begin_main_frame_not_expected_until_
|
|
&& did_notify_begin_main_frame_not_expected_soon_ == other.did_notify_begin_main_frame_not_expected_soon_
|
|
&& wants_begin_main_frame_not_expected_ == other.wants_begin_main_frame_not_expected_
|
|
&& did_commit_during_frame_ == other.did_commit_during_frame_
|
|
&& did_invalidate_layer_tree_frame_sink_ == other.did_invalidate_layer_tree_frame_sink_
|
|
&& did_perform_impl_side_invalidaion_ == other.did_perform_impl_side_invalidaion_
|
|
&& did_prepare_tiles_ == other.did_prepare_tiles_
|
|
&& consecutive_checkerboard_animations_ == other.consecutive_checkerboard_animations_
|
|
&& pending_submit_frames_ == other.pending_submit_frames_
|
|
&& submit_frames_with_current_layer_tree_frame_sink_ == other.submit_frames_with_current_layer_tree_frame_sink_
|
|
&& needs_redraw_ == other.needs_redraw_
|
|
&& needs_prepare_tiles_ == other.needs_prepare_tiles_
|
|
&& needs_begin_main_frame_ == other.needs_begin_main_frame_
|
|
&& needs_one_begin_impl_frame_ == other.needs_one_begin_impl_frame_
|
|
&& visible_ == other.visible_
|
|
&& begin_frame_source_paused_ == other.begin_frame_source_paused_
|
|
&& can_draw_ == other.can_draw_
|
|
&& resourceless_draw_ == other.resourceless_draw_
|
|
&& has_pending_tree_ == other.has_pending_tree_
|
|
&& pending_tree_is_ready_for_activation_ == other.pending_tree_is_ready_for_activation_
|
|
&& active_tree_needs_first_draw_ == other.active_tree_needs_first_draw_
|
|
&& active_tree_is_ready_to_draw_ == other.active_tree_is_ready_to_draw_
|
|
&& did_create_and_initialize_first_layer_tree_frame_sink_ == other.did_create_and_initialize_first_layer_tree_frame_sink_
|
|
&& tree_priority_ == other.tree_priority_
|
|
&& scroll_handler_state_ == other.scroll_handler_state_
|
|
&& critical_begin_main_frame_to_activate_is_fast_ == other.critical_begin_main_frame_to_activate_is_fast_
|
|
&& main_thread_missed_last_deadline_ == other.main_thread_missed_last_deadline_
|
|
&& skip_next_begin_main_frame_to_reduce_latency_ == other.skip_next_begin_main_frame_to_reduce_latency_
|
|
&& video_needs_begin_frames_ == other.video_needs_begin_frames_
|
|
&& defer_begin_main_frame_ == other.defer_begin_main_frame_
|
|
&& last_commit_had_no_updates_ == other.last_commit_had_no_updates_
|
|
&& did_draw_in_last_frame_ == other.did_draw_in_last_frame_
|
|
&& did_submit_in_last_frame_ == other.did_submit_in_last_frame_
|
|
&& needs_impl_side_invalidation_ == other.needs_impl_side_invalidation_
|
|
&& current_pending_tree_is_impl_side_ == other.current_pending_tree_is_impl_side_
|
|
&& previous_pending_tree_was_impl_side_ == other.previous_pending_tree_was_impl_side_
|
|
&& processing_animation_worklets_for_active_tree_ == other.processing_animation_worklets_for_active_tree_
|
|
&& processing_animation_worklets_for_pending_tree_ == other.processing_animation_worklets_for_pending_tree_
|
|
&& processing_paint_worklets_for_pending_tree_ == other.processing_paint_worklets_for_pending_tree_;
|
|
}
|
|
|
|
bool ChromeCompositorStateMachine_MinorState::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* commit_count */:
|
|
field.get(&commit_count_);
|
|
break;
|
|
case 2 /* current_frame_number */:
|
|
field.get(¤t_frame_number_);
|
|
break;
|
|
case 3 /* last_frame_number_submit_performed */:
|
|
field.get(&last_frame_number_submit_performed_);
|
|
break;
|
|
case 4 /* last_frame_number_draw_performed */:
|
|
field.get(&last_frame_number_draw_performed_);
|
|
break;
|
|
case 5 /* last_frame_number_begin_main_frame_sent */:
|
|
field.get(&last_frame_number_begin_main_frame_sent_);
|
|
break;
|
|
case 6 /* did_draw */:
|
|
field.get(&did_draw_);
|
|
break;
|
|
case 7 /* did_send_begin_main_frame_for_current_frame */:
|
|
field.get(&did_send_begin_main_frame_for_current_frame_);
|
|
break;
|
|
case 8 /* did_notify_begin_main_frame_not_expected_until */:
|
|
field.get(&did_notify_begin_main_frame_not_expected_until_);
|
|
break;
|
|
case 9 /* did_notify_begin_main_frame_not_expected_soon */:
|
|
field.get(&did_notify_begin_main_frame_not_expected_soon_);
|
|
break;
|
|
case 10 /* wants_begin_main_frame_not_expected */:
|
|
field.get(&wants_begin_main_frame_not_expected_);
|
|
break;
|
|
case 11 /* did_commit_during_frame */:
|
|
field.get(&did_commit_during_frame_);
|
|
break;
|
|
case 12 /* did_invalidate_layer_tree_frame_sink */:
|
|
field.get(&did_invalidate_layer_tree_frame_sink_);
|
|
break;
|
|
case 13 /* did_perform_impl_side_invalidaion */:
|
|
field.get(&did_perform_impl_side_invalidaion_);
|
|
break;
|
|
case 14 /* did_prepare_tiles */:
|
|
field.get(&did_prepare_tiles_);
|
|
break;
|
|
case 15 /* consecutive_checkerboard_animations */:
|
|
field.get(&consecutive_checkerboard_animations_);
|
|
break;
|
|
case 16 /* pending_submit_frames */:
|
|
field.get(&pending_submit_frames_);
|
|
break;
|
|
case 17 /* submit_frames_with_current_layer_tree_frame_sink */:
|
|
field.get(&submit_frames_with_current_layer_tree_frame_sink_);
|
|
break;
|
|
case 18 /* needs_redraw */:
|
|
field.get(&needs_redraw_);
|
|
break;
|
|
case 19 /* needs_prepare_tiles */:
|
|
field.get(&needs_prepare_tiles_);
|
|
break;
|
|
case 20 /* needs_begin_main_frame */:
|
|
field.get(&needs_begin_main_frame_);
|
|
break;
|
|
case 21 /* needs_one_begin_impl_frame */:
|
|
field.get(&needs_one_begin_impl_frame_);
|
|
break;
|
|
case 22 /* visible */:
|
|
field.get(&visible_);
|
|
break;
|
|
case 23 /* begin_frame_source_paused */:
|
|
field.get(&begin_frame_source_paused_);
|
|
break;
|
|
case 24 /* can_draw */:
|
|
field.get(&can_draw_);
|
|
break;
|
|
case 25 /* resourceless_draw */:
|
|
field.get(&resourceless_draw_);
|
|
break;
|
|
case 26 /* has_pending_tree */:
|
|
field.get(&has_pending_tree_);
|
|
break;
|
|
case 27 /* pending_tree_is_ready_for_activation */:
|
|
field.get(&pending_tree_is_ready_for_activation_);
|
|
break;
|
|
case 28 /* active_tree_needs_first_draw */:
|
|
field.get(&active_tree_needs_first_draw_);
|
|
break;
|
|
case 29 /* active_tree_is_ready_to_draw */:
|
|
field.get(&active_tree_is_ready_to_draw_);
|
|
break;
|
|
case 30 /* did_create_and_initialize_first_layer_tree_frame_sink */:
|
|
field.get(&did_create_and_initialize_first_layer_tree_frame_sink_);
|
|
break;
|
|
case 31 /* tree_priority */:
|
|
field.get(&tree_priority_);
|
|
break;
|
|
case 32 /* scroll_handler_state */:
|
|
field.get(&scroll_handler_state_);
|
|
break;
|
|
case 33 /* critical_begin_main_frame_to_activate_is_fast */:
|
|
field.get(&critical_begin_main_frame_to_activate_is_fast_);
|
|
break;
|
|
case 34 /* main_thread_missed_last_deadline */:
|
|
field.get(&main_thread_missed_last_deadline_);
|
|
break;
|
|
case 35 /* skip_next_begin_main_frame_to_reduce_latency */:
|
|
field.get(&skip_next_begin_main_frame_to_reduce_latency_);
|
|
break;
|
|
case 36 /* video_needs_begin_frames */:
|
|
field.get(&video_needs_begin_frames_);
|
|
break;
|
|
case 37 /* defer_begin_main_frame */:
|
|
field.get(&defer_begin_main_frame_);
|
|
break;
|
|
case 38 /* last_commit_had_no_updates */:
|
|
field.get(&last_commit_had_no_updates_);
|
|
break;
|
|
case 39 /* did_draw_in_last_frame */:
|
|
field.get(&did_draw_in_last_frame_);
|
|
break;
|
|
case 40 /* did_submit_in_last_frame */:
|
|
field.get(&did_submit_in_last_frame_);
|
|
break;
|
|
case 41 /* needs_impl_side_invalidation */:
|
|
field.get(&needs_impl_side_invalidation_);
|
|
break;
|
|
case 42 /* current_pending_tree_is_impl_side */:
|
|
field.get(¤t_pending_tree_is_impl_side_);
|
|
break;
|
|
case 43 /* previous_pending_tree_was_impl_side */:
|
|
field.get(&previous_pending_tree_was_impl_side_);
|
|
break;
|
|
case 44 /* processing_animation_worklets_for_active_tree */:
|
|
field.get(&processing_animation_worklets_for_active_tree_);
|
|
break;
|
|
case 45 /* processing_animation_worklets_for_pending_tree */:
|
|
field.get(&processing_animation_worklets_for_pending_tree_);
|
|
break;
|
|
case 46 /* processing_paint_worklets_for_pending_tree */:
|
|
field.get(&processing_paint_worklets_for_pending_tree_);
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string ChromeCompositorStateMachine_MinorState::SerializeAsString() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> ChromeCompositorStateMachine_MinorState::SerializeAsArray() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void ChromeCompositorStateMachine_MinorState::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: commit_count
|
|
if (_has_field_[1]) {
|
|
msg->AppendVarInt(1, commit_count_);
|
|
}
|
|
|
|
// Field 2: current_frame_number
|
|
if (_has_field_[2]) {
|
|
msg->AppendVarInt(2, current_frame_number_);
|
|
}
|
|
|
|
// Field 3: last_frame_number_submit_performed
|
|
if (_has_field_[3]) {
|
|
msg->AppendVarInt(3, last_frame_number_submit_performed_);
|
|
}
|
|
|
|
// Field 4: last_frame_number_draw_performed
|
|
if (_has_field_[4]) {
|
|
msg->AppendVarInt(4, last_frame_number_draw_performed_);
|
|
}
|
|
|
|
// Field 5: last_frame_number_begin_main_frame_sent
|
|
if (_has_field_[5]) {
|
|
msg->AppendVarInt(5, last_frame_number_begin_main_frame_sent_);
|
|
}
|
|
|
|
// Field 6: did_draw
|
|
if (_has_field_[6]) {
|
|
msg->AppendTinyVarInt(6, did_draw_);
|
|
}
|
|
|
|
// Field 7: did_send_begin_main_frame_for_current_frame
|
|
if (_has_field_[7]) {
|
|
msg->AppendTinyVarInt(7, did_send_begin_main_frame_for_current_frame_);
|
|
}
|
|
|
|
// Field 8: did_notify_begin_main_frame_not_expected_until
|
|
if (_has_field_[8]) {
|
|
msg->AppendTinyVarInt(8, did_notify_begin_main_frame_not_expected_until_);
|
|
}
|
|
|
|
// Field 9: did_notify_begin_main_frame_not_expected_soon
|
|
if (_has_field_[9]) {
|
|
msg->AppendTinyVarInt(9, did_notify_begin_main_frame_not_expected_soon_);
|
|
}
|
|
|
|
// Field 10: wants_begin_main_frame_not_expected
|
|
if (_has_field_[10]) {
|
|
msg->AppendTinyVarInt(10, wants_begin_main_frame_not_expected_);
|
|
}
|
|
|
|
// Field 11: did_commit_during_frame
|
|
if (_has_field_[11]) {
|
|
msg->AppendTinyVarInt(11, did_commit_during_frame_);
|
|
}
|
|
|
|
// Field 12: did_invalidate_layer_tree_frame_sink
|
|
if (_has_field_[12]) {
|
|
msg->AppendTinyVarInt(12, did_invalidate_layer_tree_frame_sink_);
|
|
}
|
|
|
|
// Field 13: did_perform_impl_side_invalidaion
|
|
if (_has_field_[13]) {
|
|
msg->AppendTinyVarInt(13, did_perform_impl_side_invalidaion_);
|
|
}
|
|
|
|
// Field 14: did_prepare_tiles
|
|
if (_has_field_[14]) {
|
|
msg->AppendTinyVarInt(14, did_prepare_tiles_);
|
|
}
|
|
|
|
// Field 15: consecutive_checkerboard_animations
|
|
if (_has_field_[15]) {
|
|
msg->AppendVarInt(15, consecutive_checkerboard_animations_);
|
|
}
|
|
|
|
// Field 16: pending_submit_frames
|
|
if (_has_field_[16]) {
|
|
msg->AppendVarInt(16, pending_submit_frames_);
|
|
}
|
|
|
|
// Field 17: submit_frames_with_current_layer_tree_frame_sink
|
|
if (_has_field_[17]) {
|
|
msg->AppendVarInt(17, submit_frames_with_current_layer_tree_frame_sink_);
|
|
}
|
|
|
|
// Field 18: needs_redraw
|
|
if (_has_field_[18]) {
|
|
msg->AppendTinyVarInt(18, needs_redraw_);
|
|
}
|
|
|
|
// Field 19: needs_prepare_tiles
|
|
if (_has_field_[19]) {
|
|
msg->AppendTinyVarInt(19, needs_prepare_tiles_);
|
|
}
|
|
|
|
// Field 20: needs_begin_main_frame
|
|
if (_has_field_[20]) {
|
|
msg->AppendTinyVarInt(20, needs_begin_main_frame_);
|
|
}
|
|
|
|
// Field 21: needs_one_begin_impl_frame
|
|
if (_has_field_[21]) {
|
|
msg->AppendTinyVarInt(21, needs_one_begin_impl_frame_);
|
|
}
|
|
|
|
// Field 22: visible
|
|
if (_has_field_[22]) {
|
|
msg->AppendTinyVarInt(22, visible_);
|
|
}
|
|
|
|
// Field 23: begin_frame_source_paused
|
|
if (_has_field_[23]) {
|
|
msg->AppendTinyVarInt(23, begin_frame_source_paused_);
|
|
}
|
|
|
|
// Field 24: can_draw
|
|
if (_has_field_[24]) {
|
|
msg->AppendTinyVarInt(24, can_draw_);
|
|
}
|
|
|
|
// Field 25: resourceless_draw
|
|
if (_has_field_[25]) {
|
|
msg->AppendTinyVarInt(25, resourceless_draw_);
|
|
}
|
|
|
|
// Field 26: has_pending_tree
|
|
if (_has_field_[26]) {
|
|
msg->AppendTinyVarInt(26, has_pending_tree_);
|
|
}
|
|
|
|
// Field 27: pending_tree_is_ready_for_activation
|
|
if (_has_field_[27]) {
|
|
msg->AppendTinyVarInt(27, pending_tree_is_ready_for_activation_);
|
|
}
|
|
|
|
// Field 28: active_tree_needs_first_draw
|
|
if (_has_field_[28]) {
|
|
msg->AppendTinyVarInt(28, active_tree_needs_first_draw_);
|
|
}
|
|
|
|
// Field 29: active_tree_is_ready_to_draw
|
|
if (_has_field_[29]) {
|
|
msg->AppendTinyVarInt(29, active_tree_is_ready_to_draw_);
|
|
}
|
|
|
|
// Field 30: did_create_and_initialize_first_layer_tree_frame_sink
|
|
if (_has_field_[30]) {
|
|
msg->AppendTinyVarInt(30, did_create_and_initialize_first_layer_tree_frame_sink_);
|
|
}
|
|
|
|
// Field 31: tree_priority
|
|
if (_has_field_[31]) {
|
|
msg->AppendVarInt(31, tree_priority_);
|
|
}
|
|
|
|
// Field 32: scroll_handler_state
|
|
if (_has_field_[32]) {
|
|
msg->AppendVarInt(32, scroll_handler_state_);
|
|
}
|
|
|
|
// Field 33: critical_begin_main_frame_to_activate_is_fast
|
|
if (_has_field_[33]) {
|
|
msg->AppendTinyVarInt(33, critical_begin_main_frame_to_activate_is_fast_);
|
|
}
|
|
|
|
// Field 34: main_thread_missed_last_deadline
|
|
if (_has_field_[34]) {
|
|
msg->AppendTinyVarInt(34, main_thread_missed_last_deadline_);
|
|
}
|
|
|
|
// Field 35: skip_next_begin_main_frame_to_reduce_latency
|
|
if (_has_field_[35]) {
|
|
msg->AppendTinyVarInt(35, skip_next_begin_main_frame_to_reduce_latency_);
|
|
}
|
|
|
|
// Field 36: video_needs_begin_frames
|
|
if (_has_field_[36]) {
|
|
msg->AppendTinyVarInt(36, video_needs_begin_frames_);
|
|
}
|
|
|
|
// Field 37: defer_begin_main_frame
|
|
if (_has_field_[37]) {
|
|
msg->AppendTinyVarInt(37, defer_begin_main_frame_);
|
|
}
|
|
|
|
// Field 38: last_commit_had_no_updates
|
|
if (_has_field_[38]) {
|
|
msg->AppendTinyVarInt(38, last_commit_had_no_updates_);
|
|
}
|
|
|
|
// Field 39: did_draw_in_last_frame
|
|
if (_has_field_[39]) {
|
|
msg->AppendTinyVarInt(39, did_draw_in_last_frame_);
|
|
}
|
|
|
|
// Field 40: did_submit_in_last_frame
|
|
if (_has_field_[40]) {
|
|
msg->AppendTinyVarInt(40, did_submit_in_last_frame_);
|
|
}
|
|
|
|
// Field 41: needs_impl_side_invalidation
|
|
if (_has_field_[41]) {
|
|
msg->AppendTinyVarInt(41, needs_impl_side_invalidation_);
|
|
}
|
|
|
|
// Field 42: current_pending_tree_is_impl_side
|
|
if (_has_field_[42]) {
|
|
msg->AppendTinyVarInt(42, current_pending_tree_is_impl_side_);
|
|
}
|
|
|
|
// Field 43: previous_pending_tree_was_impl_side
|
|
if (_has_field_[43]) {
|
|
msg->AppendTinyVarInt(43, previous_pending_tree_was_impl_side_);
|
|
}
|
|
|
|
// Field 44: processing_animation_worklets_for_active_tree
|
|
if (_has_field_[44]) {
|
|
msg->AppendTinyVarInt(44, processing_animation_worklets_for_active_tree_);
|
|
}
|
|
|
|
// Field 45: processing_animation_worklets_for_pending_tree
|
|
if (_has_field_[45]) {
|
|
msg->AppendTinyVarInt(45, processing_animation_worklets_for_pending_tree_);
|
|
}
|
|
|
|
// Field 46: processing_paint_worklets_for_pending_tree
|
|
if (_has_field_[46]) {
|
|
msg->AppendTinyVarInt(46, processing_paint_worklets_for_pending_tree_);
|
|
}
|
|
|
|
msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
|
|
}
|
|
|
|
|
|
ChromeCompositorStateMachine_MajorState::ChromeCompositorStateMachine_MajorState() = default;
|
|
ChromeCompositorStateMachine_MajorState::~ChromeCompositorStateMachine_MajorState() = default;
|
|
ChromeCompositorStateMachine_MajorState::ChromeCompositorStateMachine_MajorState(const ChromeCompositorStateMachine_MajorState&) = default;
|
|
ChromeCompositorStateMachine_MajorState& ChromeCompositorStateMachine_MajorState::operator=(const ChromeCompositorStateMachine_MajorState&) = default;
|
|
ChromeCompositorStateMachine_MajorState::ChromeCompositorStateMachine_MajorState(ChromeCompositorStateMachine_MajorState&&) noexcept = default;
|
|
ChromeCompositorStateMachine_MajorState& ChromeCompositorStateMachine_MajorState::operator=(ChromeCompositorStateMachine_MajorState&&) = default;
|
|
|
|
bool ChromeCompositorStateMachine_MajorState::operator==(const ChromeCompositorStateMachine_MajorState& other) const {
|
|
return unknown_fields_ == other.unknown_fields_
|
|
&& next_action_ == other.next_action_
|
|
&& begin_impl_frame_state_ == other.begin_impl_frame_state_
|
|
&& begin_main_frame_state_ == other.begin_main_frame_state_
|
|
&& layer_tree_frame_sink_state_ == other.layer_tree_frame_sink_state_
|
|
&& forced_redraw_state_ == other.forced_redraw_state_;
|
|
}
|
|
|
|
bool ChromeCompositorStateMachine_MajorState::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* next_action */:
|
|
field.get(&next_action_);
|
|
break;
|
|
case 2 /* begin_impl_frame_state */:
|
|
field.get(&begin_impl_frame_state_);
|
|
break;
|
|
case 3 /* begin_main_frame_state */:
|
|
field.get(&begin_main_frame_state_);
|
|
break;
|
|
case 4 /* layer_tree_frame_sink_state */:
|
|
field.get(&layer_tree_frame_sink_state_);
|
|
break;
|
|
case 5 /* forced_redraw_state */:
|
|
field.get(&forced_redraw_state_);
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string ChromeCompositorStateMachine_MajorState::SerializeAsString() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> ChromeCompositorStateMachine_MajorState::SerializeAsArray() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void ChromeCompositorStateMachine_MajorState::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: next_action
|
|
if (_has_field_[1]) {
|
|
msg->AppendVarInt(1, next_action_);
|
|
}
|
|
|
|
// Field 2: begin_impl_frame_state
|
|
if (_has_field_[2]) {
|
|
msg->AppendVarInt(2, begin_impl_frame_state_);
|
|
}
|
|
|
|
// Field 3: begin_main_frame_state
|
|
if (_has_field_[3]) {
|
|
msg->AppendVarInt(3, begin_main_frame_state_);
|
|
}
|
|
|
|
// Field 4: layer_tree_frame_sink_state
|
|
if (_has_field_[4]) {
|
|
msg->AppendVarInt(4, layer_tree_frame_sink_state_);
|
|
}
|
|
|
|
// Field 5: forced_redraw_state
|
|
if (_has_field_[5]) {
|
|
msg->AppendVarInt(5, forced_redraw_state_);
|
|
}
|
|
|
|
msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
|
|
}
|
|
|
|
|
|
ChromeCompositorSchedulerState::ChromeCompositorSchedulerState() = default;
|
|
ChromeCompositorSchedulerState::~ChromeCompositorSchedulerState() = default;
|
|
ChromeCompositorSchedulerState::ChromeCompositorSchedulerState(const ChromeCompositorSchedulerState&) = default;
|
|
ChromeCompositorSchedulerState& ChromeCompositorSchedulerState::operator=(const ChromeCompositorSchedulerState&) = default;
|
|
ChromeCompositorSchedulerState::ChromeCompositorSchedulerState(ChromeCompositorSchedulerState&&) noexcept = default;
|
|
ChromeCompositorSchedulerState& ChromeCompositorSchedulerState::operator=(ChromeCompositorSchedulerState&&) = default;
|
|
|
|
bool ChromeCompositorSchedulerState::operator==(const ChromeCompositorSchedulerState& other) const {
|
|
return unknown_fields_ == other.unknown_fields_
|
|
&& state_machine_ == other.state_machine_
|
|
&& observing_begin_frame_source_ == other.observing_begin_frame_source_
|
|
&& begin_impl_frame_deadline_task_ == other.begin_impl_frame_deadline_task_
|
|
&& pending_begin_frame_task_ == other.pending_begin_frame_task_
|
|
&& skipped_last_frame_missed_exceeded_deadline_ == other.skipped_last_frame_missed_exceeded_deadline_
|
|
&& skipped_last_frame_to_reduce_latency_ == other.skipped_last_frame_to_reduce_latency_
|
|
&& inside_action_ == other.inside_action_
|
|
&& deadline_mode_ == other.deadline_mode_
|
|
&& deadline_us_ == other.deadline_us_
|
|
&& deadline_scheduled_at_us_ == other.deadline_scheduled_at_us_
|
|
&& now_us_ == other.now_us_
|
|
&& now_to_deadline_delta_us_ == other.now_to_deadline_delta_us_
|
|
&& now_to_deadline_scheduled_at_delta_us_ == other.now_to_deadline_scheduled_at_delta_us_
|
|
&& begin_impl_frame_args_ == other.begin_impl_frame_args_
|
|
&& begin_frame_observer_state_ == other.begin_frame_observer_state_
|
|
&& begin_frame_source_state_ == other.begin_frame_source_state_
|
|
&& compositor_timing_history_ == other.compositor_timing_history_;
|
|
}
|
|
|
|
bool ChromeCompositorSchedulerState::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* state_machine */:
|
|
(*state_machine_).ParseFromString(field.as_std_string());
|
|
break;
|
|
case 2 /* observing_begin_frame_source */:
|
|
field.get(&observing_begin_frame_source_);
|
|
break;
|
|
case 3 /* begin_impl_frame_deadline_task */:
|
|
field.get(&begin_impl_frame_deadline_task_);
|
|
break;
|
|
case 4 /* pending_begin_frame_task */:
|
|
field.get(&pending_begin_frame_task_);
|
|
break;
|
|
case 5 /* skipped_last_frame_missed_exceeded_deadline */:
|
|
field.get(&skipped_last_frame_missed_exceeded_deadline_);
|
|
break;
|
|
case 6 /* skipped_last_frame_to_reduce_latency */:
|
|
field.get(&skipped_last_frame_to_reduce_latency_);
|
|
break;
|
|
case 7 /* inside_action */:
|
|
field.get(&inside_action_);
|
|
break;
|
|
case 8 /* deadline_mode */:
|
|
field.get(&deadline_mode_);
|
|
break;
|
|
case 9 /* deadline_us */:
|
|
field.get(&deadline_us_);
|
|
break;
|
|
case 10 /* deadline_scheduled_at_us */:
|
|
field.get(&deadline_scheduled_at_us_);
|
|
break;
|
|
case 11 /* now_us */:
|
|
field.get(&now_us_);
|
|
break;
|
|
case 12 /* now_to_deadline_delta_us */:
|
|
field.get(&now_to_deadline_delta_us_);
|
|
break;
|
|
case 13 /* now_to_deadline_scheduled_at_delta_us */:
|
|
field.get(&now_to_deadline_scheduled_at_delta_us_);
|
|
break;
|
|
case 14 /* begin_impl_frame_args */:
|
|
(*begin_impl_frame_args_).ParseFromString(field.as_std_string());
|
|
break;
|
|
case 15 /* begin_frame_observer_state */:
|
|
(*begin_frame_observer_state_).ParseFromString(field.as_std_string());
|
|
break;
|
|
case 16 /* begin_frame_source_state */:
|
|
(*begin_frame_source_state_).ParseFromString(field.as_std_string());
|
|
break;
|
|
case 17 /* compositor_timing_history */:
|
|
(*compositor_timing_history_).ParseFromString(field.as_std_string());
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string ChromeCompositorSchedulerState::SerializeAsString() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> ChromeCompositorSchedulerState::SerializeAsArray() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void ChromeCompositorSchedulerState::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: state_machine
|
|
if (_has_field_[1]) {
|
|
(*state_machine_).Serialize(msg->BeginNestedMessage<::protozero::Message>(1));
|
|
}
|
|
|
|
// Field 2: observing_begin_frame_source
|
|
if (_has_field_[2]) {
|
|
msg->AppendTinyVarInt(2, observing_begin_frame_source_);
|
|
}
|
|
|
|
// Field 3: begin_impl_frame_deadline_task
|
|
if (_has_field_[3]) {
|
|
msg->AppendTinyVarInt(3, begin_impl_frame_deadline_task_);
|
|
}
|
|
|
|
// Field 4: pending_begin_frame_task
|
|
if (_has_field_[4]) {
|
|
msg->AppendTinyVarInt(4, pending_begin_frame_task_);
|
|
}
|
|
|
|
// Field 5: skipped_last_frame_missed_exceeded_deadline
|
|
if (_has_field_[5]) {
|
|
msg->AppendTinyVarInt(5, skipped_last_frame_missed_exceeded_deadline_);
|
|
}
|
|
|
|
// Field 6: skipped_last_frame_to_reduce_latency
|
|
if (_has_field_[6]) {
|
|
msg->AppendTinyVarInt(6, skipped_last_frame_to_reduce_latency_);
|
|
}
|
|
|
|
// Field 7: inside_action
|
|
if (_has_field_[7]) {
|
|
msg->AppendVarInt(7, inside_action_);
|
|
}
|
|
|
|
// Field 8: deadline_mode
|
|
if (_has_field_[8]) {
|
|
msg->AppendVarInt(8, deadline_mode_);
|
|
}
|
|
|
|
// Field 9: deadline_us
|
|
if (_has_field_[9]) {
|
|
msg->AppendVarInt(9, deadline_us_);
|
|
}
|
|
|
|
// Field 10: deadline_scheduled_at_us
|
|
if (_has_field_[10]) {
|
|
msg->AppendVarInt(10, deadline_scheduled_at_us_);
|
|
}
|
|
|
|
// Field 11: now_us
|
|
if (_has_field_[11]) {
|
|
msg->AppendVarInt(11, now_us_);
|
|
}
|
|
|
|
// Field 12: now_to_deadline_delta_us
|
|
if (_has_field_[12]) {
|
|
msg->AppendVarInt(12, now_to_deadline_delta_us_);
|
|
}
|
|
|
|
// Field 13: now_to_deadline_scheduled_at_delta_us
|
|
if (_has_field_[13]) {
|
|
msg->AppendVarInt(13, now_to_deadline_scheduled_at_delta_us_);
|
|
}
|
|
|
|
// Field 14: begin_impl_frame_args
|
|
if (_has_field_[14]) {
|
|
(*begin_impl_frame_args_).Serialize(msg->BeginNestedMessage<::protozero::Message>(14));
|
|
}
|
|
|
|
// Field 15: begin_frame_observer_state
|
|
if (_has_field_[15]) {
|
|
(*begin_frame_observer_state_).Serialize(msg->BeginNestedMessage<::protozero::Message>(15));
|
|
}
|
|
|
|
// Field 16: begin_frame_source_state
|
|
if (_has_field_[16]) {
|
|
(*begin_frame_source_state_).Serialize(msg->BeginNestedMessage<::protozero::Message>(16));
|
|
}
|
|
|
|
// Field 17: compositor_timing_history
|
|
if (_has_field_[17]) {
|
|
(*compositor_timing_history_).Serialize(msg->BeginNestedMessage<::protozero::Message>(17));
|
|
}
|
|
|
|
msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
|
|
}
|
|
|
|
} // namespace perfetto
|
|
} // namespace protos
|
|
} // namespace gen
|
|
#pragma GCC diagnostic pop
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/track_event/chrome_frame_reporter.gen.cc
|
|
// gen_amalgamated begin header: gen/protos/perfetto/trace/track_event/chrome_frame_reporter.gen.h
|
|
// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
|
|
#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_CHROME_FRAME_REPORTER_PROTO_CPP_H_
|
|
#define PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_CHROME_FRAME_REPORTER_PROTO_CPP_H_
|
|
|
|
#include <stdint.h>
|
|
#include <bitset>
|
|
#include <vector>
|
|
#include <string>
|
|
#include <type_traits>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/cpp_message_obj.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/copyable_ptr.h"
|
|
// gen_amalgamated expanded: #include "perfetto/base/export.h"
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace gen {
|
|
class ChromeFrameReporter;
|
|
enum ChromeFrameReporter_State : int;
|
|
enum ChromeFrameReporter_FrameDropReason : int;
|
|
} // namespace perfetto
|
|
} // namespace protos
|
|
} // namespace gen
|
|
|
|
namespace protozero {
|
|
class Message;
|
|
} // namespace protozero
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace gen {
|
|
enum ChromeFrameReporter_State : int {
|
|
ChromeFrameReporter_State_STATE_NO_UPDATE_DESIRED = 0,
|
|
ChromeFrameReporter_State_STATE_PRESENTED_ALL = 1,
|
|
ChromeFrameReporter_State_STATE_PRESENTED_PARTIAL = 2,
|
|
ChromeFrameReporter_State_STATE_DROPPED = 3,
|
|
};
|
|
enum ChromeFrameReporter_FrameDropReason : int {
|
|
ChromeFrameReporter_FrameDropReason_REASON_UNSPECIFIED = 0,
|
|
ChromeFrameReporter_FrameDropReason_REASON_DISPLAY_COMPOSITOR = 1,
|
|
ChromeFrameReporter_FrameDropReason_REASON_MAIN_THREAD = 2,
|
|
ChromeFrameReporter_FrameDropReason_REASON_CLIENT_COMPOSITOR = 3,
|
|
};
|
|
|
|
class PERFETTO_EXPORT ChromeFrameReporter : public ::protozero::CppMessageObj {
|
|
public:
|
|
using State = ChromeFrameReporter_State;
|
|
static constexpr auto STATE_NO_UPDATE_DESIRED = ChromeFrameReporter_State_STATE_NO_UPDATE_DESIRED;
|
|
static constexpr auto STATE_PRESENTED_ALL = ChromeFrameReporter_State_STATE_PRESENTED_ALL;
|
|
static constexpr auto STATE_PRESENTED_PARTIAL = ChromeFrameReporter_State_STATE_PRESENTED_PARTIAL;
|
|
static constexpr auto STATE_DROPPED = ChromeFrameReporter_State_STATE_DROPPED;
|
|
static constexpr auto State_MIN = ChromeFrameReporter_State_STATE_NO_UPDATE_DESIRED;
|
|
static constexpr auto State_MAX = ChromeFrameReporter_State_STATE_DROPPED;
|
|
using FrameDropReason = ChromeFrameReporter_FrameDropReason;
|
|
static constexpr auto REASON_UNSPECIFIED = ChromeFrameReporter_FrameDropReason_REASON_UNSPECIFIED;
|
|
static constexpr auto REASON_DISPLAY_COMPOSITOR = ChromeFrameReporter_FrameDropReason_REASON_DISPLAY_COMPOSITOR;
|
|
static constexpr auto REASON_MAIN_THREAD = ChromeFrameReporter_FrameDropReason_REASON_MAIN_THREAD;
|
|
static constexpr auto REASON_CLIENT_COMPOSITOR = ChromeFrameReporter_FrameDropReason_REASON_CLIENT_COMPOSITOR;
|
|
static constexpr auto FrameDropReason_MIN = ChromeFrameReporter_FrameDropReason_REASON_UNSPECIFIED;
|
|
static constexpr auto FrameDropReason_MAX = ChromeFrameReporter_FrameDropReason_REASON_CLIENT_COMPOSITOR;
|
|
enum FieldNumbers {
|
|
kStateFieldNumber = 1,
|
|
kReasonFieldNumber = 2,
|
|
kFrameSourceFieldNumber = 3,
|
|
kFrameSequenceFieldNumber = 4,
|
|
};
|
|
|
|
ChromeFrameReporter();
|
|
~ChromeFrameReporter() override;
|
|
ChromeFrameReporter(ChromeFrameReporter&&) noexcept;
|
|
ChromeFrameReporter& operator=(ChromeFrameReporter&&);
|
|
ChromeFrameReporter(const ChromeFrameReporter&);
|
|
ChromeFrameReporter& operator=(const ChromeFrameReporter&);
|
|
bool operator==(const ChromeFrameReporter&) const;
|
|
bool operator!=(const ChromeFrameReporter& other) const { return !(*this == other); }
|
|
|
|
bool ParseFromArray(const void*, size_t) override;
|
|
std::string SerializeAsString() const override;
|
|
std::vector<uint8_t> SerializeAsArray() const override;
|
|
void Serialize(::protozero::Message*) const;
|
|
|
|
bool has_state() const { return _has_field_[1]; }
|
|
ChromeFrameReporter_State state() const { return state_; }
|
|
void set_state(ChromeFrameReporter_State value) { state_ = value; _has_field_.set(1); }
|
|
|
|
bool has_reason() const { return _has_field_[2]; }
|
|
ChromeFrameReporter_FrameDropReason reason() const { return reason_; }
|
|
void set_reason(ChromeFrameReporter_FrameDropReason value) { reason_ = value; _has_field_.set(2); }
|
|
|
|
bool has_frame_source() const { return _has_field_[3]; }
|
|
uint64_t frame_source() const { return frame_source_; }
|
|
void set_frame_source(uint64_t value) { frame_source_ = value; _has_field_.set(3); }
|
|
|
|
bool has_frame_sequence() const { return _has_field_[4]; }
|
|
uint64_t frame_sequence() const { return frame_sequence_; }
|
|
void set_frame_sequence(uint64_t value) { frame_sequence_ = value; _has_field_.set(4); }
|
|
|
|
private:
|
|
ChromeFrameReporter_State state_{};
|
|
ChromeFrameReporter_FrameDropReason reason_{};
|
|
uint64_t frame_source_{};
|
|
uint64_t frame_sequence_{};
|
|
|
|
// Allows to preserve unknown protobuf fields for compatibility
|
|
// with future versions of .proto files.
|
|
std::string unknown_fields_;
|
|
|
|
std::bitset<5> _has_field_{};
|
|
};
|
|
|
|
} // namespace perfetto
|
|
} // namespace protos
|
|
} // namespace gen
|
|
|
|
#endif // PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_CHROME_FRAME_REPORTER_PROTO_CPP_H_
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/scattered_heap_buffer.h"
|
|
// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
|
|
#pragma GCC diagnostic push
|
|
#pragma GCC diagnostic ignored "-Wfloat-equal"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/trace/track_event/chrome_frame_reporter.gen.h"
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace gen {
|
|
|
|
ChromeFrameReporter::ChromeFrameReporter() = default;
|
|
ChromeFrameReporter::~ChromeFrameReporter() = default;
|
|
ChromeFrameReporter::ChromeFrameReporter(const ChromeFrameReporter&) = default;
|
|
ChromeFrameReporter& ChromeFrameReporter::operator=(const ChromeFrameReporter&) = default;
|
|
ChromeFrameReporter::ChromeFrameReporter(ChromeFrameReporter&&) noexcept = default;
|
|
ChromeFrameReporter& ChromeFrameReporter::operator=(ChromeFrameReporter&&) = default;
|
|
|
|
bool ChromeFrameReporter::operator==(const ChromeFrameReporter& other) const {
|
|
return unknown_fields_ == other.unknown_fields_
|
|
&& state_ == other.state_
|
|
&& reason_ == other.reason_
|
|
&& frame_source_ == other.frame_source_
|
|
&& frame_sequence_ == other.frame_sequence_;
|
|
}
|
|
|
|
bool ChromeFrameReporter::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* state */:
|
|
field.get(&state_);
|
|
break;
|
|
case 2 /* reason */:
|
|
field.get(&reason_);
|
|
break;
|
|
case 3 /* frame_source */:
|
|
field.get(&frame_source_);
|
|
break;
|
|
case 4 /* frame_sequence */:
|
|
field.get(&frame_sequence_);
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string ChromeFrameReporter::SerializeAsString() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> ChromeFrameReporter::SerializeAsArray() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void ChromeFrameReporter::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: state
|
|
if (_has_field_[1]) {
|
|
msg->AppendVarInt(1, state_);
|
|
}
|
|
|
|
// Field 2: reason
|
|
if (_has_field_[2]) {
|
|
msg->AppendVarInt(2, reason_);
|
|
}
|
|
|
|
// Field 3: frame_source
|
|
if (_has_field_[3]) {
|
|
msg->AppendVarInt(3, frame_source_);
|
|
}
|
|
|
|
// Field 4: frame_sequence
|
|
if (_has_field_[4]) {
|
|
msg->AppendVarInt(4, frame_sequence_);
|
|
}
|
|
|
|
msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
|
|
}
|
|
|
|
} // namespace perfetto
|
|
} // namespace protos
|
|
} // namespace gen
|
|
#pragma GCC diagnostic pop
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/track_event/chrome_histogram_sample.gen.cc
|
|
// gen_amalgamated begin header: gen/protos/perfetto/trace/track_event/chrome_histogram_sample.gen.h
|
|
// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
|
|
#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_CHROME_HISTOGRAM_SAMPLE_PROTO_CPP_H_
|
|
#define PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_CHROME_HISTOGRAM_SAMPLE_PROTO_CPP_H_
|
|
|
|
#include <stdint.h>
|
|
#include <bitset>
|
|
#include <vector>
|
|
#include <string>
|
|
#include <type_traits>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/cpp_message_obj.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/copyable_ptr.h"
|
|
// gen_amalgamated expanded: #include "perfetto/base/export.h"
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace gen {
|
|
class ChromeHistogramSample;
|
|
} // namespace perfetto
|
|
} // namespace protos
|
|
} // namespace gen
|
|
|
|
namespace protozero {
|
|
class Message;
|
|
} // namespace protozero
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace gen {
|
|
|
|
class PERFETTO_EXPORT ChromeHistogramSample : public ::protozero::CppMessageObj {
|
|
public:
|
|
enum FieldNumbers {
|
|
kNameHashFieldNumber = 1,
|
|
kNameFieldNumber = 2,
|
|
kSampleFieldNumber = 3,
|
|
};
|
|
|
|
ChromeHistogramSample();
|
|
~ChromeHistogramSample() override;
|
|
ChromeHistogramSample(ChromeHistogramSample&&) noexcept;
|
|
ChromeHistogramSample& operator=(ChromeHistogramSample&&);
|
|
ChromeHistogramSample(const ChromeHistogramSample&);
|
|
ChromeHistogramSample& operator=(const ChromeHistogramSample&);
|
|
bool operator==(const ChromeHistogramSample&) const;
|
|
bool operator!=(const ChromeHistogramSample& other) const { return !(*this == other); }
|
|
|
|
bool ParseFromArray(const void*, size_t) override;
|
|
std::string SerializeAsString() const override;
|
|
std::vector<uint8_t> SerializeAsArray() const override;
|
|
void Serialize(::protozero::Message*) const;
|
|
|
|
bool has_name_hash() const { return _has_field_[1]; }
|
|
uint64_t name_hash() const { return name_hash_; }
|
|
void set_name_hash(uint64_t value) { name_hash_ = value; _has_field_.set(1); }
|
|
|
|
bool has_name() const { return _has_field_[2]; }
|
|
const std::string& name() const { return name_; }
|
|
void set_name(const std::string& value) { name_ = value; _has_field_.set(2); }
|
|
|
|
bool has_sample() const { return _has_field_[3]; }
|
|
int64_t sample() const { return sample_; }
|
|
void set_sample(int64_t value) { sample_ = value; _has_field_.set(3); }
|
|
|
|
private:
|
|
uint64_t name_hash_{};
|
|
std::string name_{};
|
|
int64_t sample_{};
|
|
|
|
// Allows to preserve unknown protobuf fields for compatibility
|
|
// with future versions of .proto files.
|
|
std::string unknown_fields_;
|
|
|
|
std::bitset<4> _has_field_{};
|
|
};
|
|
|
|
} // namespace perfetto
|
|
} // namespace protos
|
|
} // namespace gen
|
|
|
|
#endif // PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_CHROME_HISTOGRAM_SAMPLE_PROTO_CPP_H_
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/scattered_heap_buffer.h"
|
|
// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
|
|
#pragma GCC diagnostic push
|
|
#pragma GCC diagnostic ignored "-Wfloat-equal"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/trace/track_event/chrome_histogram_sample.gen.h"
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace gen {
|
|
|
|
ChromeHistogramSample::ChromeHistogramSample() = default;
|
|
ChromeHistogramSample::~ChromeHistogramSample() = default;
|
|
ChromeHistogramSample::ChromeHistogramSample(const ChromeHistogramSample&) = default;
|
|
ChromeHistogramSample& ChromeHistogramSample::operator=(const ChromeHistogramSample&) = default;
|
|
ChromeHistogramSample::ChromeHistogramSample(ChromeHistogramSample&&) noexcept = default;
|
|
ChromeHistogramSample& ChromeHistogramSample::operator=(ChromeHistogramSample&&) = default;
|
|
|
|
bool ChromeHistogramSample::operator==(const ChromeHistogramSample& other) const {
|
|
return unknown_fields_ == other.unknown_fields_
|
|
&& name_hash_ == other.name_hash_
|
|
&& name_ == other.name_
|
|
&& sample_ == other.sample_;
|
|
}
|
|
|
|
bool ChromeHistogramSample::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* name_hash */:
|
|
field.get(&name_hash_);
|
|
break;
|
|
case 2 /* name */:
|
|
field.get(&name_);
|
|
break;
|
|
case 3 /* sample */:
|
|
field.get(&sample_);
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string ChromeHistogramSample::SerializeAsString() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> ChromeHistogramSample::SerializeAsArray() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void ChromeHistogramSample::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: name_hash
|
|
if (_has_field_[1]) {
|
|
msg->AppendVarInt(1, name_hash_);
|
|
}
|
|
|
|
// Field 2: name
|
|
if (_has_field_[2]) {
|
|
msg->AppendString(2, name_);
|
|
}
|
|
|
|
// Field 3: sample
|
|
if (_has_field_[3]) {
|
|
msg->AppendVarInt(3, sample_);
|
|
}
|
|
|
|
msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
|
|
}
|
|
|
|
} // namespace perfetto
|
|
} // namespace protos
|
|
} // namespace gen
|
|
#pragma GCC diagnostic pop
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/track_event/chrome_keyed_service.gen.cc
|
|
// gen_amalgamated begin header: gen/protos/perfetto/trace/track_event/chrome_keyed_service.gen.h
|
|
// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
|
|
#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_CHROME_KEYED_SERVICE_PROTO_CPP_H_
|
|
#define PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_CHROME_KEYED_SERVICE_PROTO_CPP_H_
|
|
|
|
#include <stdint.h>
|
|
#include <bitset>
|
|
#include <vector>
|
|
#include <string>
|
|
#include <type_traits>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/cpp_message_obj.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/copyable_ptr.h"
|
|
// gen_amalgamated expanded: #include "perfetto/base/export.h"
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace gen {
|
|
class ChromeKeyedService;
|
|
} // namespace perfetto
|
|
} // namespace protos
|
|
} // namespace gen
|
|
|
|
namespace protozero {
|
|
class Message;
|
|
} // namespace protozero
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace gen {
|
|
|
|
class PERFETTO_EXPORT ChromeKeyedService : public ::protozero::CppMessageObj {
|
|
public:
|
|
enum FieldNumbers {
|
|
kNameFieldNumber = 1,
|
|
};
|
|
|
|
ChromeKeyedService();
|
|
~ChromeKeyedService() override;
|
|
ChromeKeyedService(ChromeKeyedService&&) noexcept;
|
|
ChromeKeyedService& operator=(ChromeKeyedService&&);
|
|
ChromeKeyedService(const ChromeKeyedService&);
|
|
ChromeKeyedService& operator=(const ChromeKeyedService&);
|
|
bool operator==(const ChromeKeyedService&) const;
|
|
bool operator!=(const ChromeKeyedService& other) const { return !(*this == other); }
|
|
|
|
bool ParseFromArray(const void*, size_t) override;
|
|
std::string SerializeAsString() const override;
|
|
std::vector<uint8_t> SerializeAsArray() const override;
|
|
void Serialize(::protozero::Message*) const;
|
|
|
|
bool has_name() const { return _has_field_[1]; }
|
|
const std::string& name() const { return name_; }
|
|
void set_name(const std::string& value) { name_ = value; _has_field_.set(1); }
|
|
|
|
private:
|
|
std::string name_{};
|
|
|
|
// Allows to preserve unknown protobuf fields for compatibility
|
|
// with future versions of .proto files.
|
|
std::string unknown_fields_;
|
|
|
|
std::bitset<2> _has_field_{};
|
|
};
|
|
|
|
} // namespace perfetto
|
|
} // namespace protos
|
|
} // namespace gen
|
|
|
|
#endif // PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_CHROME_KEYED_SERVICE_PROTO_CPP_H_
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/scattered_heap_buffer.h"
|
|
// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
|
|
#pragma GCC diagnostic push
|
|
#pragma GCC diagnostic ignored "-Wfloat-equal"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/trace/track_event/chrome_keyed_service.gen.h"
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace gen {
|
|
|
|
ChromeKeyedService::ChromeKeyedService() = default;
|
|
ChromeKeyedService::~ChromeKeyedService() = default;
|
|
ChromeKeyedService::ChromeKeyedService(const ChromeKeyedService&) = default;
|
|
ChromeKeyedService& ChromeKeyedService::operator=(const ChromeKeyedService&) = default;
|
|
ChromeKeyedService::ChromeKeyedService(ChromeKeyedService&&) noexcept = default;
|
|
ChromeKeyedService& ChromeKeyedService::operator=(ChromeKeyedService&&) = default;
|
|
|
|
bool ChromeKeyedService::operator==(const ChromeKeyedService& other) const {
|
|
return unknown_fields_ == other.unknown_fields_
|
|
&& name_ == other.name_;
|
|
}
|
|
|
|
bool ChromeKeyedService::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* name */:
|
|
field.get(&name_);
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string ChromeKeyedService::SerializeAsString() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> ChromeKeyedService::SerializeAsArray() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void ChromeKeyedService::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: name
|
|
if (_has_field_[1]) {
|
|
msg->AppendString(1, name_);
|
|
}
|
|
|
|
msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
|
|
}
|
|
|
|
} // namespace perfetto
|
|
} // namespace protos
|
|
} // namespace gen
|
|
#pragma GCC diagnostic pop
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/track_event/chrome_latency_info.gen.cc
|
|
// gen_amalgamated begin header: gen/protos/perfetto/trace/track_event/chrome_latency_info.gen.h
|
|
// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
|
|
#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_CHROME_LATENCY_INFO_PROTO_CPP_H_
|
|
#define PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_CHROME_LATENCY_INFO_PROTO_CPP_H_
|
|
|
|
#include <stdint.h>
|
|
#include <bitset>
|
|
#include <vector>
|
|
#include <string>
|
|
#include <type_traits>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/cpp_message_obj.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/copyable_ptr.h"
|
|
// gen_amalgamated expanded: #include "perfetto/base/export.h"
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace gen {
|
|
class ChromeLatencyInfo;
|
|
class ChromeLatencyInfo_ComponentInfo;
|
|
enum ChromeLatencyInfo_Step : int;
|
|
enum ChromeLatencyInfo_LatencyComponentType : int;
|
|
} // namespace perfetto
|
|
} // namespace protos
|
|
} // namespace gen
|
|
|
|
namespace protozero {
|
|
class Message;
|
|
} // namespace protozero
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace gen {
|
|
enum ChromeLatencyInfo_Step : int {
|
|
ChromeLatencyInfo_Step_STEP_UNSPECIFIED = 0,
|
|
ChromeLatencyInfo_Step_STEP_SEND_INPUT_EVENT_UI = 3,
|
|
ChromeLatencyInfo_Step_STEP_HANDLE_INPUT_EVENT_IMPL = 5,
|
|
ChromeLatencyInfo_Step_STEP_DID_HANDLE_INPUT_AND_OVERSCROLL = 8,
|
|
ChromeLatencyInfo_Step_STEP_HANDLE_INPUT_EVENT_MAIN = 4,
|
|
ChromeLatencyInfo_Step_STEP_MAIN_THREAD_SCROLL_UPDATE = 2,
|
|
ChromeLatencyInfo_Step_STEP_HANDLE_INPUT_EVENT_MAIN_COMMIT = 1,
|
|
ChromeLatencyInfo_Step_STEP_HANDLED_INPUT_EVENT_MAIN_OR_IMPL = 9,
|
|
ChromeLatencyInfo_Step_STEP_HANDLED_INPUT_EVENT_IMPL = 10,
|
|
ChromeLatencyInfo_Step_STEP_SWAP_BUFFERS = 6,
|
|
ChromeLatencyInfo_Step_STEP_DRAW_AND_SWAP = 7,
|
|
};
|
|
enum ChromeLatencyInfo_LatencyComponentType : int {
|
|
ChromeLatencyInfo_LatencyComponentType_COMPONENT_UNSPECIFIED = 0,
|
|
ChromeLatencyInfo_LatencyComponentType_COMPONENT_INPUT_EVENT_LATENCY_BEGIN_RWH = 1,
|
|
ChromeLatencyInfo_LatencyComponentType_COMPONENT_INPUT_EVENT_LATENCY_SCROLL_UPDATE_ORIGINAL = 2,
|
|
ChromeLatencyInfo_LatencyComponentType_COMPONENT_INPUT_EVENT_LATENCY_FIRST_SCROLL_UPDATE_ORIGINAL = 3,
|
|
ChromeLatencyInfo_LatencyComponentType_COMPONENT_INPUT_EVENT_LATENCY_ORIGINAL = 4,
|
|
ChromeLatencyInfo_LatencyComponentType_COMPONENT_INPUT_EVENT_LATENCY_UI = 5,
|
|
ChromeLatencyInfo_LatencyComponentType_COMPONENT_INPUT_EVENT_LATENCY_RENDERER_MAIN = 6,
|
|
ChromeLatencyInfo_LatencyComponentType_COMPONENT_INPUT_EVENT_LATENCY_RENDERING_SCHEDULED_MAIN = 7,
|
|
ChromeLatencyInfo_LatencyComponentType_COMPONENT_INPUT_EVENT_LATENCY_RENDERING_SCHEDULED_IMPL = 8,
|
|
ChromeLatencyInfo_LatencyComponentType_COMPONENT_INPUT_EVENT_LATENCY_SCROLL_UPDATE_LAST_EVENT = 9,
|
|
ChromeLatencyInfo_LatencyComponentType_COMPONENT_INPUT_EVENT_LATENCY_ACK_RWH = 10,
|
|
ChromeLatencyInfo_LatencyComponentType_COMPONENT_INPUT_EVENT_LATENCY_RENDERER_SWAP = 11,
|
|
ChromeLatencyInfo_LatencyComponentType_COMPONENT_DISPLAY_COMPOSITOR_RECEIVED_FRAME = 12,
|
|
ChromeLatencyInfo_LatencyComponentType_COMPONENT_INPUT_EVENT_GPU_SWAP_BUFFER = 13,
|
|
ChromeLatencyInfo_LatencyComponentType_COMPONENT_INPUT_EVENT_LATENCY_FRAME_SWAP = 14,
|
|
};
|
|
|
|
class PERFETTO_EXPORT ChromeLatencyInfo : public ::protozero::CppMessageObj {
|
|
public:
|
|
using ComponentInfo = ChromeLatencyInfo_ComponentInfo;
|
|
using Step = ChromeLatencyInfo_Step;
|
|
static constexpr auto STEP_UNSPECIFIED = ChromeLatencyInfo_Step_STEP_UNSPECIFIED;
|
|
static constexpr auto STEP_SEND_INPUT_EVENT_UI = ChromeLatencyInfo_Step_STEP_SEND_INPUT_EVENT_UI;
|
|
static constexpr auto STEP_HANDLE_INPUT_EVENT_IMPL = ChromeLatencyInfo_Step_STEP_HANDLE_INPUT_EVENT_IMPL;
|
|
static constexpr auto STEP_DID_HANDLE_INPUT_AND_OVERSCROLL = ChromeLatencyInfo_Step_STEP_DID_HANDLE_INPUT_AND_OVERSCROLL;
|
|
static constexpr auto STEP_HANDLE_INPUT_EVENT_MAIN = ChromeLatencyInfo_Step_STEP_HANDLE_INPUT_EVENT_MAIN;
|
|
static constexpr auto STEP_MAIN_THREAD_SCROLL_UPDATE = ChromeLatencyInfo_Step_STEP_MAIN_THREAD_SCROLL_UPDATE;
|
|
static constexpr auto STEP_HANDLE_INPUT_EVENT_MAIN_COMMIT = ChromeLatencyInfo_Step_STEP_HANDLE_INPUT_EVENT_MAIN_COMMIT;
|
|
static constexpr auto STEP_HANDLED_INPUT_EVENT_MAIN_OR_IMPL = ChromeLatencyInfo_Step_STEP_HANDLED_INPUT_EVENT_MAIN_OR_IMPL;
|
|
static constexpr auto STEP_HANDLED_INPUT_EVENT_IMPL = ChromeLatencyInfo_Step_STEP_HANDLED_INPUT_EVENT_IMPL;
|
|
static constexpr auto STEP_SWAP_BUFFERS = ChromeLatencyInfo_Step_STEP_SWAP_BUFFERS;
|
|
static constexpr auto STEP_DRAW_AND_SWAP = ChromeLatencyInfo_Step_STEP_DRAW_AND_SWAP;
|
|
static constexpr auto Step_MIN = ChromeLatencyInfo_Step_STEP_UNSPECIFIED;
|
|
static constexpr auto Step_MAX = ChromeLatencyInfo_Step_STEP_HANDLED_INPUT_EVENT_IMPL;
|
|
using LatencyComponentType = ChromeLatencyInfo_LatencyComponentType;
|
|
static constexpr auto COMPONENT_UNSPECIFIED = ChromeLatencyInfo_LatencyComponentType_COMPONENT_UNSPECIFIED;
|
|
static constexpr auto COMPONENT_INPUT_EVENT_LATENCY_BEGIN_RWH = ChromeLatencyInfo_LatencyComponentType_COMPONENT_INPUT_EVENT_LATENCY_BEGIN_RWH;
|
|
static constexpr auto COMPONENT_INPUT_EVENT_LATENCY_SCROLL_UPDATE_ORIGINAL = ChromeLatencyInfo_LatencyComponentType_COMPONENT_INPUT_EVENT_LATENCY_SCROLL_UPDATE_ORIGINAL;
|
|
static constexpr auto COMPONENT_INPUT_EVENT_LATENCY_FIRST_SCROLL_UPDATE_ORIGINAL = ChromeLatencyInfo_LatencyComponentType_COMPONENT_INPUT_EVENT_LATENCY_FIRST_SCROLL_UPDATE_ORIGINAL;
|
|
static constexpr auto COMPONENT_INPUT_EVENT_LATENCY_ORIGINAL = ChromeLatencyInfo_LatencyComponentType_COMPONENT_INPUT_EVENT_LATENCY_ORIGINAL;
|
|
static constexpr auto COMPONENT_INPUT_EVENT_LATENCY_UI = ChromeLatencyInfo_LatencyComponentType_COMPONENT_INPUT_EVENT_LATENCY_UI;
|
|
static constexpr auto COMPONENT_INPUT_EVENT_LATENCY_RENDERER_MAIN = ChromeLatencyInfo_LatencyComponentType_COMPONENT_INPUT_EVENT_LATENCY_RENDERER_MAIN;
|
|
static constexpr auto COMPONENT_INPUT_EVENT_LATENCY_RENDERING_SCHEDULED_MAIN = ChromeLatencyInfo_LatencyComponentType_COMPONENT_INPUT_EVENT_LATENCY_RENDERING_SCHEDULED_MAIN;
|
|
static constexpr auto COMPONENT_INPUT_EVENT_LATENCY_RENDERING_SCHEDULED_IMPL = ChromeLatencyInfo_LatencyComponentType_COMPONENT_INPUT_EVENT_LATENCY_RENDERING_SCHEDULED_IMPL;
|
|
static constexpr auto COMPONENT_INPUT_EVENT_LATENCY_SCROLL_UPDATE_LAST_EVENT = ChromeLatencyInfo_LatencyComponentType_COMPONENT_INPUT_EVENT_LATENCY_SCROLL_UPDATE_LAST_EVENT;
|
|
static constexpr auto COMPONENT_INPUT_EVENT_LATENCY_ACK_RWH = ChromeLatencyInfo_LatencyComponentType_COMPONENT_INPUT_EVENT_LATENCY_ACK_RWH;
|
|
static constexpr auto COMPONENT_INPUT_EVENT_LATENCY_RENDERER_SWAP = ChromeLatencyInfo_LatencyComponentType_COMPONENT_INPUT_EVENT_LATENCY_RENDERER_SWAP;
|
|
static constexpr auto COMPONENT_DISPLAY_COMPOSITOR_RECEIVED_FRAME = ChromeLatencyInfo_LatencyComponentType_COMPONENT_DISPLAY_COMPOSITOR_RECEIVED_FRAME;
|
|
static constexpr auto COMPONENT_INPUT_EVENT_GPU_SWAP_BUFFER = ChromeLatencyInfo_LatencyComponentType_COMPONENT_INPUT_EVENT_GPU_SWAP_BUFFER;
|
|
static constexpr auto COMPONENT_INPUT_EVENT_LATENCY_FRAME_SWAP = ChromeLatencyInfo_LatencyComponentType_COMPONENT_INPUT_EVENT_LATENCY_FRAME_SWAP;
|
|
static constexpr auto LatencyComponentType_MIN = ChromeLatencyInfo_LatencyComponentType_COMPONENT_UNSPECIFIED;
|
|
static constexpr auto LatencyComponentType_MAX = ChromeLatencyInfo_LatencyComponentType_COMPONENT_INPUT_EVENT_LATENCY_FRAME_SWAP;
|
|
enum FieldNumbers {
|
|
kTraceIdFieldNumber = 1,
|
|
kStepFieldNumber = 2,
|
|
kFrameTreeNodeIdFieldNumber = 3,
|
|
kComponentInfoFieldNumber = 4,
|
|
kIsCoalescedFieldNumber = 5,
|
|
kGestureScrollIdFieldNumber = 6,
|
|
};
|
|
|
|
ChromeLatencyInfo();
|
|
~ChromeLatencyInfo() override;
|
|
ChromeLatencyInfo(ChromeLatencyInfo&&) noexcept;
|
|
ChromeLatencyInfo& operator=(ChromeLatencyInfo&&);
|
|
ChromeLatencyInfo(const ChromeLatencyInfo&);
|
|
ChromeLatencyInfo& operator=(const ChromeLatencyInfo&);
|
|
bool operator==(const ChromeLatencyInfo&) const;
|
|
bool operator!=(const ChromeLatencyInfo& other) const { return !(*this == other); }
|
|
|
|
bool ParseFromArray(const void*, size_t) override;
|
|
std::string SerializeAsString() const override;
|
|
std::vector<uint8_t> SerializeAsArray() const override;
|
|
void Serialize(::protozero::Message*) const;
|
|
|
|
bool has_trace_id() const { return _has_field_[1]; }
|
|
int64_t trace_id() const { return trace_id_; }
|
|
void set_trace_id(int64_t value) { trace_id_ = value; _has_field_.set(1); }
|
|
|
|
bool has_step() const { return _has_field_[2]; }
|
|
ChromeLatencyInfo_Step step() const { return step_; }
|
|
void set_step(ChromeLatencyInfo_Step value) { step_ = value; _has_field_.set(2); }
|
|
|
|
bool has_frame_tree_node_id() const { return _has_field_[3]; }
|
|
int32_t frame_tree_node_id() const { return frame_tree_node_id_; }
|
|
void set_frame_tree_node_id(int32_t value) { frame_tree_node_id_ = value; _has_field_.set(3); }
|
|
|
|
int component_info_size() const { return static_cast<int>(component_info_.size()); }
|
|
const std::vector<ChromeLatencyInfo_ComponentInfo>& component_info() const { return component_info_; }
|
|
std::vector<ChromeLatencyInfo_ComponentInfo>* mutable_component_info() { return &component_info_; }
|
|
void clear_component_info() { component_info_.clear(); }
|
|
ChromeLatencyInfo_ComponentInfo* add_component_info() { component_info_.emplace_back(); return &component_info_.back(); }
|
|
|
|
bool has_is_coalesced() const { return _has_field_[5]; }
|
|
bool is_coalesced() const { return is_coalesced_; }
|
|
void set_is_coalesced(bool value) { is_coalesced_ = value; _has_field_.set(5); }
|
|
|
|
bool has_gesture_scroll_id() const { return _has_field_[6]; }
|
|
int64_t gesture_scroll_id() const { return gesture_scroll_id_; }
|
|
void set_gesture_scroll_id(int64_t value) { gesture_scroll_id_ = value; _has_field_.set(6); }
|
|
|
|
private:
|
|
int64_t trace_id_{};
|
|
ChromeLatencyInfo_Step step_{};
|
|
int32_t frame_tree_node_id_{};
|
|
std::vector<ChromeLatencyInfo_ComponentInfo> component_info_;
|
|
bool is_coalesced_{};
|
|
int64_t gesture_scroll_id_{};
|
|
|
|
// Allows to preserve unknown protobuf fields for compatibility
|
|
// with future versions of .proto files.
|
|
std::string unknown_fields_;
|
|
|
|
std::bitset<7> _has_field_{};
|
|
};
|
|
|
|
|
|
class PERFETTO_EXPORT ChromeLatencyInfo_ComponentInfo : public ::protozero::CppMessageObj {
|
|
public:
|
|
enum FieldNumbers {
|
|
kComponentTypeFieldNumber = 1,
|
|
kTimeUsFieldNumber = 2,
|
|
};
|
|
|
|
ChromeLatencyInfo_ComponentInfo();
|
|
~ChromeLatencyInfo_ComponentInfo() override;
|
|
ChromeLatencyInfo_ComponentInfo(ChromeLatencyInfo_ComponentInfo&&) noexcept;
|
|
ChromeLatencyInfo_ComponentInfo& operator=(ChromeLatencyInfo_ComponentInfo&&);
|
|
ChromeLatencyInfo_ComponentInfo(const ChromeLatencyInfo_ComponentInfo&);
|
|
ChromeLatencyInfo_ComponentInfo& operator=(const ChromeLatencyInfo_ComponentInfo&);
|
|
bool operator==(const ChromeLatencyInfo_ComponentInfo&) const;
|
|
bool operator!=(const ChromeLatencyInfo_ComponentInfo& other) const { return !(*this == other); }
|
|
|
|
bool ParseFromArray(const void*, size_t) override;
|
|
std::string SerializeAsString() const override;
|
|
std::vector<uint8_t> SerializeAsArray() const override;
|
|
void Serialize(::protozero::Message*) const;
|
|
|
|
bool has_component_type() const { return _has_field_[1]; }
|
|
ChromeLatencyInfo_LatencyComponentType component_type() const { return component_type_; }
|
|
void set_component_type(ChromeLatencyInfo_LatencyComponentType value) { component_type_ = value; _has_field_.set(1); }
|
|
|
|
bool has_time_us() const { return _has_field_[2]; }
|
|
uint64_t time_us() const { return time_us_; }
|
|
void set_time_us(uint64_t value) { time_us_ = value; _has_field_.set(2); }
|
|
|
|
private:
|
|
ChromeLatencyInfo_LatencyComponentType component_type_{};
|
|
uint64_t time_us_{};
|
|
|
|
// Allows to preserve unknown protobuf fields for compatibility
|
|
// with future versions of .proto files.
|
|
std::string unknown_fields_;
|
|
|
|
std::bitset<3> _has_field_{};
|
|
};
|
|
|
|
} // namespace perfetto
|
|
} // namespace protos
|
|
} // namespace gen
|
|
|
|
#endif // PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_CHROME_LATENCY_INFO_PROTO_CPP_H_
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/scattered_heap_buffer.h"
|
|
// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
|
|
#pragma GCC diagnostic push
|
|
#pragma GCC diagnostic ignored "-Wfloat-equal"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/trace/track_event/chrome_latency_info.gen.h"
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace gen {
|
|
|
|
ChromeLatencyInfo::ChromeLatencyInfo() = default;
|
|
ChromeLatencyInfo::~ChromeLatencyInfo() = default;
|
|
ChromeLatencyInfo::ChromeLatencyInfo(const ChromeLatencyInfo&) = default;
|
|
ChromeLatencyInfo& ChromeLatencyInfo::operator=(const ChromeLatencyInfo&) = default;
|
|
ChromeLatencyInfo::ChromeLatencyInfo(ChromeLatencyInfo&&) noexcept = default;
|
|
ChromeLatencyInfo& ChromeLatencyInfo::operator=(ChromeLatencyInfo&&) = default;
|
|
|
|
bool ChromeLatencyInfo::operator==(const ChromeLatencyInfo& other) const {
|
|
return unknown_fields_ == other.unknown_fields_
|
|
&& trace_id_ == other.trace_id_
|
|
&& step_ == other.step_
|
|
&& frame_tree_node_id_ == other.frame_tree_node_id_
|
|
&& component_info_ == other.component_info_
|
|
&& is_coalesced_ == other.is_coalesced_
|
|
&& gesture_scroll_id_ == other.gesture_scroll_id_;
|
|
}
|
|
|
|
bool ChromeLatencyInfo::ParseFromArray(const void* raw, size_t size) {
|
|
component_info_.clear();
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* trace_id */:
|
|
field.get(&trace_id_);
|
|
break;
|
|
case 2 /* step */:
|
|
field.get(&step_);
|
|
break;
|
|
case 3 /* frame_tree_node_id */:
|
|
field.get(&frame_tree_node_id_);
|
|
break;
|
|
case 4 /* component_info */:
|
|
component_info_.emplace_back();
|
|
component_info_.back().ParseFromString(field.as_std_string());
|
|
break;
|
|
case 5 /* is_coalesced */:
|
|
field.get(&is_coalesced_);
|
|
break;
|
|
case 6 /* gesture_scroll_id */:
|
|
field.get(&gesture_scroll_id_);
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string ChromeLatencyInfo::SerializeAsString() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> ChromeLatencyInfo::SerializeAsArray() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void ChromeLatencyInfo::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: trace_id
|
|
if (_has_field_[1]) {
|
|
msg->AppendVarInt(1, trace_id_);
|
|
}
|
|
|
|
// Field 2: step
|
|
if (_has_field_[2]) {
|
|
msg->AppendVarInt(2, step_);
|
|
}
|
|
|
|
// Field 3: frame_tree_node_id
|
|
if (_has_field_[3]) {
|
|
msg->AppendVarInt(3, frame_tree_node_id_);
|
|
}
|
|
|
|
// Field 4: component_info
|
|
for (auto& it : component_info_) {
|
|
it.Serialize(msg->BeginNestedMessage<::protozero::Message>(4));
|
|
}
|
|
|
|
// Field 5: is_coalesced
|
|
if (_has_field_[5]) {
|
|
msg->AppendTinyVarInt(5, is_coalesced_);
|
|
}
|
|
|
|
// Field 6: gesture_scroll_id
|
|
if (_has_field_[6]) {
|
|
msg->AppendVarInt(6, gesture_scroll_id_);
|
|
}
|
|
|
|
msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
|
|
}
|
|
|
|
|
|
ChromeLatencyInfo_ComponentInfo::ChromeLatencyInfo_ComponentInfo() = default;
|
|
ChromeLatencyInfo_ComponentInfo::~ChromeLatencyInfo_ComponentInfo() = default;
|
|
ChromeLatencyInfo_ComponentInfo::ChromeLatencyInfo_ComponentInfo(const ChromeLatencyInfo_ComponentInfo&) = default;
|
|
ChromeLatencyInfo_ComponentInfo& ChromeLatencyInfo_ComponentInfo::operator=(const ChromeLatencyInfo_ComponentInfo&) = default;
|
|
ChromeLatencyInfo_ComponentInfo::ChromeLatencyInfo_ComponentInfo(ChromeLatencyInfo_ComponentInfo&&) noexcept = default;
|
|
ChromeLatencyInfo_ComponentInfo& ChromeLatencyInfo_ComponentInfo::operator=(ChromeLatencyInfo_ComponentInfo&&) = default;
|
|
|
|
bool ChromeLatencyInfo_ComponentInfo::operator==(const ChromeLatencyInfo_ComponentInfo& other) const {
|
|
return unknown_fields_ == other.unknown_fields_
|
|
&& component_type_ == other.component_type_
|
|
&& time_us_ == other.time_us_;
|
|
}
|
|
|
|
bool ChromeLatencyInfo_ComponentInfo::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* component_type */:
|
|
field.get(&component_type_);
|
|
break;
|
|
case 2 /* time_us */:
|
|
field.get(&time_us_);
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string ChromeLatencyInfo_ComponentInfo::SerializeAsString() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> ChromeLatencyInfo_ComponentInfo::SerializeAsArray() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void ChromeLatencyInfo_ComponentInfo::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: component_type
|
|
if (_has_field_[1]) {
|
|
msg->AppendVarInt(1, component_type_);
|
|
}
|
|
|
|
// Field 2: time_us
|
|
if (_has_field_[2]) {
|
|
msg->AppendVarInt(2, time_us_);
|
|
}
|
|
|
|
msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
|
|
}
|
|
|
|
} // namespace perfetto
|
|
} // namespace protos
|
|
} // namespace gen
|
|
#pragma GCC diagnostic pop
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/track_event/chrome_legacy_ipc.gen.cc
|
|
// gen_amalgamated begin header: gen/protos/perfetto/trace/track_event/chrome_legacy_ipc.gen.h
|
|
// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
|
|
#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_CHROME_LEGACY_IPC_PROTO_CPP_H_
|
|
#define PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_CHROME_LEGACY_IPC_PROTO_CPP_H_
|
|
|
|
#include <stdint.h>
|
|
#include <bitset>
|
|
#include <vector>
|
|
#include <string>
|
|
#include <type_traits>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/cpp_message_obj.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/copyable_ptr.h"
|
|
// gen_amalgamated expanded: #include "perfetto/base/export.h"
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace gen {
|
|
class ChromeLegacyIpc;
|
|
enum ChromeLegacyIpc_MessageClass : int;
|
|
} // namespace perfetto
|
|
} // namespace protos
|
|
} // namespace gen
|
|
|
|
namespace protozero {
|
|
class Message;
|
|
} // namespace protozero
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace gen {
|
|
enum ChromeLegacyIpc_MessageClass : int {
|
|
ChromeLegacyIpc_MessageClass_CLASS_UNSPECIFIED = 0,
|
|
ChromeLegacyIpc_MessageClass_CLASS_AUTOMATION = 1,
|
|
ChromeLegacyIpc_MessageClass_CLASS_FRAME = 2,
|
|
ChromeLegacyIpc_MessageClass_CLASS_PAGE = 3,
|
|
ChromeLegacyIpc_MessageClass_CLASS_VIEW = 4,
|
|
ChromeLegacyIpc_MessageClass_CLASS_WIDGET = 5,
|
|
ChromeLegacyIpc_MessageClass_CLASS_INPUT = 6,
|
|
ChromeLegacyIpc_MessageClass_CLASS_TEST = 7,
|
|
ChromeLegacyIpc_MessageClass_CLASS_WORKER = 8,
|
|
ChromeLegacyIpc_MessageClass_CLASS_NACL = 9,
|
|
ChromeLegacyIpc_MessageClass_CLASS_GPU_CHANNEL = 10,
|
|
ChromeLegacyIpc_MessageClass_CLASS_MEDIA = 11,
|
|
ChromeLegacyIpc_MessageClass_CLASS_PPAPI = 12,
|
|
ChromeLegacyIpc_MessageClass_CLASS_CHROME = 13,
|
|
ChromeLegacyIpc_MessageClass_CLASS_DRAG = 14,
|
|
ChromeLegacyIpc_MessageClass_CLASS_PRINT = 15,
|
|
ChromeLegacyIpc_MessageClass_CLASS_EXTENSION = 16,
|
|
ChromeLegacyIpc_MessageClass_CLASS_TEXT_INPUT_CLIENT = 17,
|
|
ChromeLegacyIpc_MessageClass_CLASS_BLINK_TEST = 18,
|
|
ChromeLegacyIpc_MessageClass_CLASS_ACCESSIBILITY = 19,
|
|
ChromeLegacyIpc_MessageClass_CLASS_PRERENDER = 20,
|
|
ChromeLegacyIpc_MessageClass_CLASS_CHROMOTING = 21,
|
|
ChromeLegacyIpc_MessageClass_CLASS_BROWSER_PLUGIN = 22,
|
|
ChromeLegacyIpc_MessageClass_CLASS_ANDROID_WEB_VIEW = 23,
|
|
ChromeLegacyIpc_MessageClass_CLASS_NACL_HOST = 24,
|
|
ChromeLegacyIpc_MessageClass_CLASS_ENCRYPTED_MEDIA = 25,
|
|
ChromeLegacyIpc_MessageClass_CLASS_CAST = 26,
|
|
ChromeLegacyIpc_MessageClass_CLASS_GIN_JAVA_BRIDGE = 27,
|
|
ChromeLegacyIpc_MessageClass_CLASS_CHROME_UTILITY_PRINTING = 28,
|
|
ChromeLegacyIpc_MessageClass_CLASS_OZONE_GPU = 29,
|
|
ChromeLegacyIpc_MessageClass_CLASS_WEB_TEST = 30,
|
|
ChromeLegacyIpc_MessageClass_CLASS_NETWORK_HINTS = 31,
|
|
ChromeLegacyIpc_MessageClass_CLASS_EXTENSIONS_GUEST_VIEW = 32,
|
|
ChromeLegacyIpc_MessageClass_CLASS_GUEST_VIEW = 33,
|
|
ChromeLegacyIpc_MessageClass_CLASS_MEDIA_PLAYER_DELEGATE = 34,
|
|
ChromeLegacyIpc_MessageClass_CLASS_EXTENSION_WORKER = 35,
|
|
ChromeLegacyIpc_MessageClass_CLASS_SUBRESOURCE_FILTER = 36,
|
|
ChromeLegacyIpc_MessageClass_CLASS_UNFREEZABLE_FRAME = 37,
|
|
};
|
|
|
|
class PERFETTO_EXPORT ChromeLegacyIpc : public ::protozero::CppMessageObj {
|
|
public:
|
|
using MessageClass = ChromeLegacyIpc_MessageClass;
|
|
static constexpr auto CLASS_UNSPECIFIED = ChromeLegacyIpc_MessageClass_CLASS_UNSPECIFIED;
|
|
static constexpr auto CLASS_AUTOMATION = ChromeLegacyIpc_MessageClass_CLASS_AUTOMATION;
|
|
static constexpr auto CLASS_FRAME = ChromeLegacyIpc_MessageClass_CLASS_FRAME;
|
|
static constexpr auto CLASS_PAGE = ChromeLegacyIpc_MessageClass_CLASS_PAGE;
|
|
static constexpr auto CLASS_VIEW = ChromeLegacyIpc_MessageClass_CLASS_VIEW;
|
|
static constexpr auto CLASS_WIDGET = ChromeLegacyIpc_MessageClass_CLASS_WIDGET;
|
|
static constexpr auto CLASS_INPUT = ChromeLegacyIpc_MessageClass_CLASS_INPUT;
|
|
static constexpr auto CLASS_TEST = ChromeLegacyIpc_MessageClass_CLASS_TEST;
|
|
static constexpr auto CLASS_WORKER = ChromeLegacyIpc_MessageClass_CLASS_WORKER;
|
|
static constexpr auto CLASS_NACL = ChromeLegacyIpc_MessageClass_CLASS_NACL;
|
|
static constexpr auto CLASS_GPU_CHANNEL = ChromeLegacyIpc_MessageClass_CLASS_GPU_CHANNEL;
|
|
static constexpr auto CLASS_MEDIA = ChromeLegacyIpc_MessageClass_CLASS_MEDIA;
|
|
static constexpr auto CLASS_PPAPI = ChromeLegacyIpc_MessageClass_CLASS_PPAPI;
|
|
static constexpr auto CLASS_CHROME = ChromeLegacyIpc_MessageClass_CLASS_CHROME;
|
|
static constexpr auto CLASS_DRAG = ChromeLegacyIpc_MessageClass_CLASS_DRAG;
|
|
static constexpr auto CLASS_PRINT = ChromeLegacyIpc_MessageClass_CLASS_PRINT;
|
|
static constexpr auto CLASS_EXTENSION = ChromeLegacyIpc_MessageClass_CLASS_EXTENSION;
|
|
static constexpr auto CLASS_TEXT_INPUT_CLIENT = ChromeLegacyIpc_MessageClass_CLASS_TEXT_INPUT_CLIENT;
|
|
static constexpr auto CLASS_BLINK_TEST = ChromeLegacyIpc_MessageClass_CLASS_BLINK_TEST;
|
|
static constexpr auto CLASS_ACCESSIBILITY = ChromeLegacyIpc_MessageClass_CLASS_ACCESSIBILITY;
|
|
static constexpr auto CLASS_PRERENDER = ChromeLegacyIpc_MessageClass_CLASS_PRERENDER;
|
|
static constexpr auto CLASS_CHROMOTING = ChromeLegacyIpc_MessageClass_CLASS_CHROMOTING;
|
|
static constexpr auto CLASS_BROWSER_PLUGIN = ChromeLegacyIpc_MessageClass_CLASS_BROWSER_PLUGIN;
|
|
static constexpr auto CLASS_ANDROID_WEB_VIEW = ChromeLegacyIpc_MessageClass_CLASS_ANDROID_WEB_VIEW;
|
|
static constexpr auto CLASS_NACL_HOST = ChromeLegacyIpc_MessageClass_CLASS_NACL_HOST;
|
|
static constexpr auto CLASS_ENCRYPTED_MEDIA = ChromeLegacyIpc_MessageClass_CLASS_ENCRYPTED_MEDIA;
|
|
static constexpr auto CLASS_CAST = ChromeLegacyIpc_MessageClass_CLASS_CAST;
|
|
static constexpr auto CLASS_GIN_JAVA_BRIDGE = ChromeLegacyIpc_MessageClass_CLASS_GIN_JAVA_BRIDGE;
|
|
static constexpr auto CLASS_CHROME_UTILITY_PRINTING = ChromeLegacyIpc_MessageClass_CLASS_CHROME_UTILITY_PRINTING;
|
|
static constexpr auto CLASS_OZONE_GPU = ChromeLegacyIpc_MessageClass_CLASS_OZONE_GPU;
|
|
static constexpr auto CLASS_WEB_TEST = ChromeLegacyIpc_MessageClass_CLASS_WEB_TEST;
|
|
static constexpr auto CLASS_NETWORK_HINTS = ChromeLegacyIpc_MessageClass_CLASS_NETWORK_HINTS;
|
|
static constexpr auto CLASS_EXTENSIONS_GUEST_VIEW = ChromeLegacyIpc_MessageClass_CLASS_EXTENSIONS_GUEST_VIEW;
|
|
static constexpr auto CLASS_GUEST_VIEW = ChromeLegacyIpc_MessageClass_CLASS_GUEST_VIEW;
|
|
static constexpr auto CLASS_MEDIA_PLAYER_DELEGATE = ChromeLegacyIpc_MessageClass_CLASS_MEDIA_PLAYER_DELEGATE;
|
|
static constexpr auto CLASS_EXTENSION_WORKER = ChromeLegacyIpc_MessageClass_CLASS_EXTENSION_WORKER;
|
|
static constexpr auto CLASS_SUBRESOURCE_FILTER = ChromeLegacyIpc_MessageClass_CLASS_SUBRESOURCE_FILTER;
|
|
static constexpr auto CLASS_UNFREEZABLE_FRAME = ChromeLegacyIpc_MessageClass_CLASS_UNFREEZABLE_FRAME;
|
|
static constexpr auto MessageClass_MIN = ChromeLegacyIpc_MessageClass_CLASS_UNSPECIFIED;
|
|
static constexpr auto MessageClass_MAX = ChromeLegacyIpc_MessageClass_CLASS_UNFREEZABLE_FRAME;
|
|
enum FieldNumbers {
|
|
kMessageClassFieldNumber = 1,
|
|
kMessageLineFieldNumber = 2,
|
|
};
|
|
|
|
ChromeLegacyIpc();
|
|
~ChromeLegacyIpc() override;
|
|
ChromeLegacyIpc(ChromeLegacyIpc&&) noexcept;
|
|
ChromeLegacyIpc& operator=(ChromeLegacyIpc&&);
|
|
ChromeLegacyIpc(const ChromeLegacyIpc&);
|
|
ChromeLegacyIpc& operator=(const ChromeLegacyIpc&);
|
|
bool operator==(const ChromeLegacyIpc&) const;
|
|
bool operator!=(const ChromeLegacyIpc& other) const { return !(*this == other); }
|
|
|
|
bool ParseFromArray(const void*, size_t) override;
|
|
std::string SerializeAsString() const override;
|
|
std::vector<uint8_t> SerializeAsArray() const override;
|
|
void Serialize(::protozero::Message*) const;
|
|
|
|
bool has_message_class() const { return _has_field_[1]; }
|
|
ChromeLegacyIpc_MessageClass message_class() const { return message_class_; }
|
|
void set_message_class(ChromeLegacyIpc_MessageClass value) { message_class_ = value; _has_field_.set(1); }
|
|
|
|
bool has_message_line() const { return _has_field_[2]; }
|
|
uint32_t message_line() const { return message_line_; }
|
|
void set_message_line(uint32_t value) { message_line_ = value; _has_field_.set(2); }
|
|
|
|
private:
|
|
ChromeLegacyIpc_MessageClass message_class_{};
|
|
uint32_t message_line_{};
|
|
|
|
// Allows to preserve unknown protobuf fields for compatibility
|
|
// with future versions of .proto files.
|
|
std::string unknown_fields_;
|
|
|
|
std::bitset<3> _has_field_{};
|
|
};
|
|
|
|
} // namespace perfetto
|
|
} // namespace protos
|
|
} // namespace gen
|
|
|
|
#endif // PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_CHROME_LEGACY_IPC_PROTO_CPP_H_
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/scattered_heap_buffer.h"
|
|
// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
|
|
#pragma GCC diagnostic push
|
|
#pragma GCC diagnostic ignored "-Wfloat-equal"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/trace/track_event/chrome_legacy_ipc.gen.h"
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace gen {
|
|
|
|
ChromeLegacyIpc::ChromeLegacyIpc() = default;
|
|
ChromeLegacyIpc::~ChromeLegacyIpc() = default;
|
|
ChromeLegacyIpc::ChromeLegacyIpc(const ChromeLegacyIpc&) = default;
|
|
ChromeLegacyIpc& ChromeLegacyIpc::operator=(const ChromeLegacyIpc&) = default;
|
|
ChromeLegacyIpc::ChromeLegacyIpc(ChromeLegacyIpc&&) noexcept = default;
|
|
ChromeLegacyIpc& ChromeLegacyIpc::operator=(ChromeLegacyIpc&&) = default;
|
|
|
|
bool ChromeLegacyIpc::operator==(const ChromeLegacyIpc& other) const {
|
|
return unknown_fields_ == other.unknown_fields_
|
|
&& message_class_ == other.message_class_
|
|
&& message_line_ == other.message_line_;
|
|
}
|
|
|
|
bool ChromeLegacyIpc::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* message_class */:
|
|
field.get(&message_class_);
|
|
break;
|
|
case 2 /* message_line */:
|
|
field.get(&message_line_);
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string ChromeLegacyIpc::SerializeAsString() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> ChromeLegacyIpc::SerializeAsArray() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void ChromeLegacyIpc::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: message_class
|
|
if (_has_field_[1]) {
|
|
msg->AppendVarInt(1, message_class_);
|
|
}
|
|
|
|
// Field 2: message_line
|
|
if (_has_field_[2]) {
|
|
msg->AppendVarInt(2, message_line_);
|
|
}
|
|
|
|
msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
|
|
}
|
|
|
|
} // namespace perfetto
|
|
} // namespace protos
|
|
} // namespace gen
|
|
#pragma GCC diagnostic pop
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/track_event/chrome_process_descriptor.gen.cc
|
|
// gen_amalgamated begin header: gen/protos/perfetto/trace/track_event/chrome_process_descriptor.gen.h
|
|
// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
|
|
#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_CHROME_PROCESS_DESCRIPTOR_PROTO_CPP_H_
|
|
#define PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_CHROME_PROCESS_DESCRIPTOR_PROTO_CPP_H_
|
|
|
|
#include <stdint.h>
|
|
#include <bitset>
|
|
#include <vector>
|
|
#include <string>
|
|
#include <type_traits>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/cpp_message_obj.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/copyable_ptr.h"
|
|
// gen_amalgamated expanded: #include "perfetto/base/export.h"
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace gen {
|
|
class ChromeProcessDescriptor;
|
|
enum ChromeProcessDescriptor_ProcessType : int;
|
|
} // namespace perfetto
|
|
} // namespace protos
|
|
} // namespace gen
|
|
|
|
namespace protozero {
|
|
class Message;
|
|
} // namespace protozero
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace gen {
|
|
enum ChromeProcessDescriptor_ProcessType : int {
|
|
ChromeProcessDescriptor_ProcessType_PROCESS_UNSPECIFIED = 0,
|
|
ChromeProcessDescriptor_ProcessType_PROCESS_BROWSER = 1,
|
|
ChromeProcessDescriptor_ProcessType_PROCESS_RENDERER = 2,
|
|
ChromeProcessDescriptor_ProcessType_PROCESS_UTILITY = 3,
|
|
ChromeProcessDescriptor_ProcessType_PROCESS_ZYGOTE = 4,
|
|
ChromeProcessDescriptor_ProcessType_PROCESS_SANDBOX_HELPER = 5,
|
|
ChromeProcessDescriptor_ProcessType_PROCESS_GPU = 6,
|
|
ChromeProcessDescriptor_ProcessType_PROCESS_PPAPI_PLUGIN = 7,
|
|
ChromeProcessDescriptor_ProcessType_PROCESS_PPAPI_BROKER = 8,
|
|
};
|
|
|
|
class PERFETTO_EXPORT ChromeProcessDescriptor : public ::protozero::CppMessageObj {
|
|
public:
|
|
using ProcessType = ChromeProcessDescriptor_ProcessType;
|
|
static constexpr auto PROCESS_UNSPECIFIED = ChromeProcessDescriptor_ProcessType_PROCESS_UNSPECIFIED;
|
|
static constexpr auto PROCESS_BROWSER = ChromeProcessDescriptor_ProcessType_PROCESS_BROWSER;
|
|
static constexpr auto PROCESS_RENDERER = ChromeProcessDescriptor_ProcessType_PROCESS_RENDERER;
|
|
static constexpr auto PROCESS_UTILITY = ChromeProcessDescriptor_ProcessType_PROCESS_UTILITY;
|
|
static constexpr auto PROCESS_ZYGOTE = ChromeProcessDescriptor_ProcessType_PROCESS_ZYGOTE;
|
|
static constexpr auto PROCESS_SANDBOX_HELPER = ChromeProcessDescriptor_ProcessType_PROCESS_SANDBOX_HELPER;
|
|
static constexpr auto PROCESS_GPU = ChromeProcessDescriptor_ProcessType_PROCESS_GPU;
|
|
static constexpr auto PROCESS_PPAPI_PLUGIN = ChromeProcessDescriptor_ProcessType_PROCESS_PPAPI_PLUGIN;
|
|
static constexpr auto PROCESS_PPAPI_BROKER = ChromeProcessDescriptor_ProcessType_PROCESS_PPAPI_BROKER;
|
|
static constexpr auto ProcessType_MIN = ChromeProcessDescriptor_ProcessType_PROCESS_UNSPECIFIED;
|
|
static constexpr auto ProcessType_MAX = ChromeProcessDescriptor_ProcessType_PROCESS_PPAPI_BROKER;
|
|
enum FieldNumbers {
|
|
kProcessTypeFieldNumber = 1,
|
|
kProcessPriorityFieldNumber = 2,
|
|
kLegacySortIndexFieldNumber = 3,
|
|
kHostAppPackageNameFieldNumber = 4,
|
|
};
|
|
|
|
ChromeProcessDescriptor();
|
|
~ChromeProcessDescriptor() override;
|
|
ChromeProcessDescriptor(ChromeProcessDescriptor&&) noexcept;
|
|
ChromeProcessDescriptor& operator=(ChromeProcessDescriptor&&);
|
|
ChromeProcessDescriptor(const ChromeProcessDescriptor&);
|
|
ChromeProcessDescriptor& operator=(const ChromeProcessDescriptor&);
|
|
bool operator==(const ChromeProcessDescriptor&) const;
|
|
bool operator!=(const ChromeProcessDescriptor& other) const { return !(*this == other); }
|
|
|
|
bool ParseFromArray(const void*, size_t) override;
|
|
std::string SerializeAsString() const override;
|
|
std::vector<uint8_t> SerializeAsArray() const override;
|
|
void Serialize(::protozero::Message*) const;
|
|
|
|
bool has_process_type() const { return _has_field_[1]; }
|
|
ChromeProcessDescriptor_ProcessType process_type() const { return process_type_; }
|
|
void set_process_type(ChromeProcessDescriptor_ProcessType value) { process_type_ = value; _has_field_.set(1); }
|
|
|
|
bool has_process_priority() const { return _has_field_[2]; }
|
|
int32_t process_priority() const { return process_priority_; }
|
|
void set_process_priority(int32_t value) { process_priority_ = value; _has_field_.set(2); }
|
|
|
|
bool has_legacy_sort_index() const { return _has_field_[3]; }
|
|
int32_t legacy_sort_index() const { return legacy_sort_index_; }
|
|
void set_legacy_sort_index(int32_t value) { legacy_sort_index_ = value; _has_field_.set(3); }
|
|
|
|
bool has_host_app_package_name() const { return _has_field_[4]; }
|
|
const std::string& host_app_package_name() const { return host_app_package_name_; }
|
|
void set_host_app_package_name(const std::string& value) { host_app_package_name_ = value; _has_field_.set(4); }
|
|
|
|
private:
|
|
ChromeProcessDescriptor_ProcessType process_type_{};
|
|
int32_t process_priority_{};
|
|
int32_t legacy_sort_index_{};
|
|
std::string host_app_package_name_{};
|
|
|
|
// Allows to preserve unknown protobuf fields for compatibility
|
|
// with future versions of .proto files.
|
|
std::string unknown_fields_;
|
|
|
|
std::bitset<5> _has_field_{};
|
|
};
|
|
|
|
} // namespace perfetto
|
|
} // namespace protos
|
|
} // namespace gen
|
|
|
|
#endif // PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_CHROME_PROCESS_DESCRIPTOR_PROTO_CPP_H_
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/scattered_heap_buffer.h"
|
|
// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
|
|
#pragma GCC diagnostic push
|
|
#pragma GCC diagnostic ignored "-Wfloat-equal"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/trace/track_event/chrome_process_descriptor.gen.h"
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace gen {
|
|
|
|
ChromeProcessDescriptor::ChromeProcessDescriptor() = default;
|
|
ChromeProcessDescriptor::~ChromeProcessDescriptor() = default;
|
|
ChromeProcessDescriptor::ChromeProcessDescriptor(const ChromeProcessDescriptor&) = default;
|
|
ChromeProcessDescriptor& ChromeProcessDescriptor::operator=(const ChromeProcessDescriptor&) = default;
|
|
ChromeProcessDescriptor::ChromeProcessDescriptor(ChromeProcessDescriptor&&) noexcept = default;
|
|
ChromeProcessDescriptor& ChromeProcessDescriptor::operator=(ChromeProcessDescriptor&&) = default;
|
|
|
|
bool ChromeProcessDescriptor::operator==(const ChromeProcessDescriptor& other) const {
|
|
return unknown_fields_ == other.unknown_fields_
|
|
&& process_type_ == other.process_type_
|
|
&& process_priority_ == other.process_priority_
|
|
&& legacy_sort_index_ == other.legacy_sort_index_
|
|
&& host_app_package_name_ == other.host_app_package_name_;
|
|
}
|
|
|
|
bool ChromeProcessDescriptor::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* process_type */:
|
|
field.get(&process_type_);
|
|
break;
|
|
case 2 /* process_priority */:
|
|
field.get(&process_priority_);
|
|
break;
|
|
case 3 /* legacy_sort_index */:
|
|
field.get(&legacy_sort_index_);
|
|
break;
|
|
case 4 /* host_app_package_name */:
|
|
field.get(&host_app_package_name_);
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string ChromeProcessDescriptor::SerializeAsString() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> ChromeProcessDescriptor::SerializeAsArray() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void ChromeProcessDescriptor::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: process_type
|
|
if (_has_field_[1]) {
|
|
msg->AppendVarInt(1, process_type_);
|
|
}
|
|
|
|
// Field 2: process_priority
|
|
if (_has_field_[2]) {
|
|
msg->AppendVarInt(2, process_priority_);
|
|
}
|
|
|
|
// Field 3: legacy_sort_index
|
|
if (_has_field_[3]) {
|
|
msg->AppendVarInt(3, legacy_sort_index_);
|
|
}
|
|
|
|
// Field 4: host_app_package_name
|
|
if (_has_field_[4]) {
|
|
msg->AppendString(4, host_app_package_name_);
|
|
}
|
|
|
|
msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
|
|
}
|
|
|
|
} // namespace perfetto
|
|
} // namespace protos
|
|
} // namespace gen
|
|
#pragma GCC diagnostic pop
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/track_event/chrome_thread_descriptor.gen.cc
|
|
// gen_amalgamated begin header: gen/protos/perfetto/trace/track_event/chrome_thread_descriptor.gen.h
|
|
// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
|
|
#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_CHROME_THREAD_DESCRIPTOR_PROTO_CPP_H_
|
|
#define PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_CHROME_THREAD_DESCRIPTOR_PROTO_CPP_H_
|
|
|
|
#include <stdint.h>
|
|
#include <bitset>
|
|
#include <vector>
|
|
#include <string>
|
|
#include <type_traits>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/cpp_message_obj.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/copyable_ptr.h"
|
|
// gen_amalgamated expanded: #include "perfetto/base/export.h"
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace gen {
|
|
class ChromeThreadDescriptor;
|
|
enum ChromeThreadDescriptor_ThreadType : int;
|
|
} // namespace perfetto
|
|
} // namespace protos
|
|
} // namespace gen
|
|
|
|
namespace protozero {
|
|
class Message;
|
|
} // namespace protozero
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace gen {
|
|
enum ChromeThreadDescriptor_ThreadType : int {
|
|
ChromeThreadDescriptor_ThreadType_THREAD_UNSPECIFIED = 0,
|
|
ChromeThreadDescriptor_ThreadType_THREAD_MAIN = 1,
|
|
ChromeThreadDescriptor_ThreadType_THREAD_IO = 2,
|
|
ChromeThreadDescriptor_ThreadType_THREAD_POOL_BG_WORKER = 3,
|
|
ChromeThreadDescriptor_ThreadType_THREAD_POOL_FG_WORKER = 4,
|
|
ChromeThreadDescriptor_ThreadType_THREAD_POOL_BG_BLOCKING = 6,
|
|
ChromeThreadDescriptor_ThreadType_THREAD_POOL_FG_BLOCKING = 5,
|
|
ChromeThreadDescriptor_ThreadType_THREAD_POOL_SERVICE = 7,
|
|
ChromeThreadDescriptor_ThreadType_THREAD_COMPOSITOR = 8,
|
|
ChromeThreadDescriptor_ThreadType_THREAD_VIZ_COMPOSITOR = 9,
|
|
ChromeThreadDescriptor_ThreadType_THREAD_COMPOSITOR_WORKER = 10,
|
|
ChromeThreadDescriptor_ThreadType_THREAD_SERVICE_WORKER = 11,
|
|
ChromeThreadDescriptor_ThreadType_THREAD_MEMORY_INFRA = 50,
|
|
ChromeThreadDescriptor_ThreadType_THREAD_SAMPLING_PROFILER = 51,
|
|
};
|
|
|
|
class PERFETTO_EXPORT ChromeThreadDescriptor : public ::protozero::CppMessageObj {
|
|
public:
|
|
using ThreadType = ChromeThreadDescriptor_ThreadType;
|
|
static constexpr auto THREAD_UNSPECIFIED = ChromeThreadDescriptor_ThreadType_THREAD_UNSPECIFIED;
|
|
static constexpr auto THREAD_MAIN = ChromeThreadDescriptor_ThreadType_THREAD_MAIN;
|
|
static constexpr auto THREAD_IO = ChromeThreadDescriptor_ThreadType_THREAD_IO;
|
|
static constexpr auto THREAD_POOL_BG_WORKER = ChromeThreadDescriptor_ThreadType_THREAD_POOL_BG_WORKER;
|
|
static constexpr auto THREAD_POOL_FG_WORKER = ChromeThreadDescriptor_ThreadType_THREAD_POOL_FG_WORKER;
|
|
static constexpr auto THREAD_POOL_BG_BLOCKING = ChromeThreadDescriptor_ThreadType_THREAD_POOL_BG_BLOCKING;
|
|
static constexpr auto THREAD_POOL_FG_BLOCKING = ChromeThreadDescriptor_ThreadType_THREAD_POOL_FG_BLOCKING;
|
|
static constexpr auto THREAD_POOL_SERVICE = ChromeThreadDescriptor_ThreadType_THREAD_POOL_SERVICE;
|
|
static constexpr auto THREAD_COMPOSITOR = ChromeThreadDescriptor_ThreadType_THREAD_COMPOSITOR;
|
|
static constexpr auto THREAD_VIZ_COMPOSITOR = ChromeThreadDescriptor_ThreadType_THREAD_VIZ_COMPOSITOR;
|
|
static constexpr auto THREAD_COMPOSITOR_WORKER = ChromeThreadDescriptor_ThreadType_THREAD_COMPOSITOR_WORKER;
|
|
static constexpr auto THREAD_SERVICE_WORKER = ChromeThreadDescriptor_ThreadType_THREAD_SERVICE_WORKER;
|
|
static constexpr auto THREAD_MEMORY_INFRA = ChromeThreadDescriptor_ThreadType_THREAD_MEMORY_INFRA;
|
|
static constexpr auto THREAD_SAMPLING_PROFILER = ChromeThreadDescriptor_ThreadType_THREAD_SAMPLING_PROFILER;
|
|
static constexpr auto ThreadType_MIN = ChromeThreadDescriptor_ThreadType_THREAD_UNSPECIFIED;
|
|
static constexpr auto ThreadType_MAX = ChromeThreadDescriptor_ThreadType_THREAD_SAMPLING_PROFILER;
|
|
enum FieldNumbers {
|
|
kThreadTypeFieldNumber = 1,
|
|
kLegacySortIndexFieldNumber = 2,
|
|
};
|
|
|
|
ChromeThreadDescriptor();
|
|
~ChromeThreadDescriptor() override;
|
|
ChromeThreadDescriptor(ChromeThreadDescriptor&&) noexcept;
|
|
ChromeThreadDescriptor& operator=(ChromeThreadDescriptor&&);
|
|
ChromeThreadDescriptor(const ChromeThreadDescriptor&);
|
|
ChromeThreadDescriptor& operator=(const ChromeThreadDescriptor&);
|
|
bool operator==(const ChromeThreadDescriptor&) const;
|
|
bool operator!=(const ChromeThreadDescriptor& other) const { return !(*this == other); }
|
|
|
|
bool ParseFromArray(const void*, size_t) override;
|
|
std::string SerializeAsString() const override;
|
|
std::vector<uint8_t> SerializeAsArray() const override;
|
|
void Serialize(::protozero::Message*) const;
|
|
|
|
bool has_thread_type() const { return _has_field_[1]; }
|
|
ChromeThreadDescriptor_ThreadType thread_type() const { return thread_type_; }
|
|
void set_thread_type(ChromeThreadDescriptor_ThreadType value) { thread_type_ = value; _has_field_.set(1); }
|
|
|
|
bool has_legacy_sort_index() const { return _has_field_[2]; }
|
|
int32_t legacy_sort_index() const { return legacy_sort_index_; }
|
|
void set_legacy_sort_index(int32_t value) { legacy_sort_index_ = value; _has_field_.set(2); }
|
|
|
|
private:
|
|
ChromeThreadDescriptor_ThreadType thread_type_{};
|
|
int32_t legacy_sort_index_{};
|
|
|
|
// Allows to preserve unknown protobuf fields for compatibility
|
|
// with future versions of .proto files.
|
|
std::string unknown_fields_;
|
|
|
|
std::bitset<3> _has_field_{};
|
|
};
|
|
|
|
} // namespace perfetto
|
|
} // namespace protos
|
|
} // namespace gen
|
|
|
|
#endif // PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_CHROME_THREAD_DESCRIPTOR_PROTO_CPP_H_
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/scattered_heap_buffer.h"
|
|
// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
|
|
#pragma GCC diagnostic push
|
|
#pragma GCC diagnostic ignored "-Wfloat-equal"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/trace/track_event/chrome_thread_descriptor.gen.h"
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace gen {
|
|
|
|
ChromeThreadDescriptor::ChromeThreadDescriptor() = default;
|
|
ChromeThreadDescriptor::~ChromeThreadDescriptor() = default;
|
|
ChromeThreadDescriptor::ChromeThreadDescriptor(const ChromeThreadDescriptor&) = default;
|
|
ChromeThreadDescriptor& ChromeThreadDescriptor::operator=(const ChromeThreadDescriptor&) = default;
|
|
ChromeThreadDescriptor::ChromeThreadDescriptor(ChromeThreadDescriptor&&) noexcept = default;
|
|
ChromeThreadDescriptor& ChromeThreadDescriptor::operator=(ChromeThreadDescriptor&&) = default;
|
|
|
|
bool ChromeThreadDescriptor::operator==(const ChromeThreadDescriptor& other) const {
|
|
return unknown_fields_ == other.unknown_fields_
|
|
&& thread_type_ == other.thread_type_
|
|
&& legacy_sort_index_ == other.legacy_sort_index_;
|
|
}
|
|
|
|
bool ChromeThreadDescriptor::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* thread_type */:
|
|
field.get(&thread_type_);
|
|
break;
|
|
case 2 /* legacy_sort_index */:
|
|
field.get(&legacy_sort_index_);
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string ChromeThreadDescriptor::SerializeAsString() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> ChromeThreadDescriptor::SerializeAsArray() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void ChromeThreadDescriptor::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: thread_type
|
|
if (_has_field_[1]) {
|
|
msg->AppendVarInt(1, thread_type_);
|
|
}
|
|
|
|
// Field 2: legacy_sort_index
|
|
if (_has_field_[2]) {
|
|
msg->AppendVarInt(2, legacy_sort_index_);
|
|
}
|
|
|
|
msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
|
|
}
|
|
|
|
} // namespace perfetto
|
|
} // namespace protos
|
|
} // namespace gen
|
|
#pragma GCC diagnostic pop
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/track_event/chrome_user_event.gen.cc
|
|
// gen_amalgamated begin header: gen/protos/perfetto/trace/track_event/chrome_user_event.gen.h
|
|
// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
|
|
#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_CHROME_USER_EVENT_PROTO_CPP_H_
|
|
#define PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_CHROME_USER_EVENT_PROTO_CPP_H_
|
|
|
|
#include <stdint.h>
|
|
#include <bitset>
|
|
#include <vector>
|
|
#include <string>
|
|
#include <type_traits>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/cpp_message_obj.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/copyable_ptr.h"
|
|
// gen_amalgamated expanded: #include "perfetto/base/export.h"
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace gen {
|
|
class ChromeUserEvent;
|
|
} // namespace perfetto
|
|
} // namespace protos
|
|
} // namespace gen
|
|
|
|
namespace protozero {
|
|
class Message;
|
|
} // namespace protozero
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace gen {
|
|
|
|
class PERFETTO_EXPORT ChromeUserEvent : public ::protozero::CppMessageObj {
|
|
public:
|
|
enum FieldNumbers {
|
|
kActionFieldNumber = 1,
|
|
kActionHashFieldNumber = 2,
|
|
};
|
|
|
|
ChromeUserEvent();
|
|
~ChromeUserEvent() override;
|
|
ChromeUserEvent(ChromeUserEvent&&) noexcept;
|
|
ChromeUserEvent& operator=(ChromeUserEvent&&);
|
|
ChromeUserEvent(const ChromeUserEvent&);
|
|
ChromeUserEvent& operator=(const ChromeUserEvent&);
|
|
bool operator==(const ChromeUserEvent&) const;
|
|
bool operator!=(const ChromeUserEvent& other) const { return !(*this == other); }
|
|
|
|
bool ParseFromArray(const void*, size_t) override;
|
|
std::string SerializeAsString() const override;
|
|
std::vector<uint8_t> SerializeAsArray() const override;
|
|
void Serialize(::protozero::Message*) const;
|
|
|
|
bool has_action() const { return _has_field_[1]; }
|
|
const std::string& action() const { return action_; }
|
|
void set_action(const std::string& value) { action_ = value; _has_field_.set(1); }
|
|
|
|
bool has_action_hash() const { return _has_field_[2]; }
|
|
uint64_t action_hash() const { return action_hash_; }
|
|
void set_action_hash(uint64_t value) { action_hash_ = value; _has_field_.set(2); }
|
|
|
|
private:
|
|
std::string action_{};
|
|
uint64_t action_hash_{};
|
|
|
|
// Allows to preserve unknown protobuf fields for compatibility
|
|
// with future versions of .proto files.
|
|
std::string unknown_fields_;
|
|
|
|
std::bitset<3> _has_field_{};
|
|
};
|
|
|
|
} // namespace perfetto
|
|
} // namespace protos
|
|
} // namespace gen
|
|
|
|
#endif // PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_CHROME_USER_EVENT_PROTO_CPP_H_
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/scattered_heap_buffer.h"
|
|
// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
|
|
#pragma GCC diagnostic push
|
|
#pragma GCC diagnostic ignored "-Wfloat-equal"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/trace/track_event/chrome_user_event.gen.h"
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace gen {
|
|
|
|
ChromeUserEvent::ChromeUserEvent() = default;
|
|
ChromeUserEvent::~ChromeUserEvent() = default;
|
|
ChromeUserEvent::ChromeUserEvent(const ChromeUserEvent&) = default;
|
|
ChromeUserEvent& ChromeUserEvent::operator=(const ChromeUserEvent&) = default;
|
|
ChromeUserEvent::ChromeUserEvent(ChromeUserEvent&&) noexcept = default;
|
|
ChromeUserEvent& ChromeUserEvent::operator=(ChromeUserEvent&&) = default;
|
|
|
|
bool ChromeUserEvent::operator==(const ChromeUserEvent& other) const {
|
|
return unknown_fields_ == other.unknown_fields_
|
|
&& action_ == other.action_
|
|
&& action_hash_ == other.action_hash_;
|
|
}
|
|
|
|
bool ChromeUserEvent::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* action */:
|
|
field.get(&action_);
|
|
break;
|
|
case 2 /* action_hash */:
|
|
field.get(&action_hash_);
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string ChromeUserEvent::SerializeAsString() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> ChromeUserEvent::SerializeAsArray() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void ChromeUserEvent::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: action
|
|
if (_has_field_[1]) {
|
|
msg->AppendString(1, action_);
|
|
}
|
|
|
|
// Field 2: action_hash
|
|
if (_has_field_[2]) {
|
|
msg->AppendVarInt(2, action_hash_);
|
|
}
|
|
|
|
msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
|
|
}
|
|
|
|
} // namespace perfetto
|
|
} // namespace protos
|
|
} // namespace gen
|
|
#pragma GCC diagnostic pop
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/track_event/counter_descriptor.gen.cc
|
|
// gen_amalgamated begin header: gen/protos/perfetto/trace/track_event/counter_descriptor.gen.h
|
|
// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
|
|
#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_COUNTER_DESCRIPTOR_PROTO_CPP_H_
|
|
#define PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_COUNTER_DESCRIPTOR_PROTO_CPP_H_
|
|
|
|
#include <stdint.h>
|
|
#include <bitset>
|
|
#include <vector>
|
|
#include <string>
|
|
#include <type_traits>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/cpp_message_obj.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/copyable_ptr.h"
|
|
// gen_amalgamated expanded: #include "perfetto/base/export.h"
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace gen {
|
|
class CounterDescriptor;
|
|
enum CounterDescriptor_BuiltinCounterType : int;
|
|
enum CounterDescriptor_Unit : int;
|
|
} // namespace perfetto
|
|
} // namespace protos
|
|
} // namespace gen
|
|
|
|
namespace protozero {
|
|
class Message;
|
|
} // namespace protozero
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace gen {
|
|
enum CounterDescriptor_BuiltinCounterType : int {
|
|
CounterDescriptor_BuiltinCounterType_COUNTER_UNSPECIFIED = 0,
|
|
CounterDescriptor_BuiltinCounterType_COUNTER_THREAD_TIME_NS = 1,
|
|
CounterDescriptor_BuiltinCounterType_COUNTER_THREAD_INSTRUCTION_COUNT = 2,
|
|
};
|
|
enum CounterDescriptor_Unit : int {
|
|
CounterDescriptor_Unit_UNIT_UNSPECIFIED = 0,
|
|
CounterDescriptor_Unit_UNIT_TIME_NS = 1,
|
|
CounterDescriptor_Unit_UNIT_COUNT = 2,
|
|
CounterDescriptor_Unit_UNIT_SIZE_BYTES = 3,
|
|
};
|
|
|
|
class PERFETTO_EXPORT CounterDescriptor : public ::protozero::CppMessageObj {
|
|
public:
|
|
using BuiltinCounterType = CounterDescriptor_BuiltinCounterType;
|
|
static constexpr auto COUNTER_UNSPECIFIED = CounterDescriptor_BuiltinCounterType_COUNTER_UNSPECIFIED;
|
|
static constexpr auto COUNTER_THREAD_TIME_NS = CounterDescriptor_BuiltinCounterType_COUNTER_THREAD_TIME_NS;
|
|
static constexpr auto COUNTER_THREAD_INSTRUCTION_COUNT = CounterDescriptor_BuiltinCounterType_COUNTER_THREAD_INSTRUCTION_COUNT;
|
|
static constexpr auto BuiltinCounterType_MIN = CounterDescriptor_BuiltinCounterType_COUNTER_UNSPECIFIED;
|
|
static constexpr auto BuiltinCounterType_MAX = CounterDescriptor_BuiltinCounterType_COUNTER_THREAD_INSTRUCTION_COUNT;
|
|
using Unit = CounterDescriptor_Unit;
|
|
static constexpr auto UNIT_UNSPECIFIED = CounterDescriptor_Unit_UNIT_UNSPECIFIED;
|
|
static constexpr auto UNIT_TIME_NS = CounterDescriptor_Unit_UNIT_TIME_NS;
|
|
static constexpr auto UNIT_COUNT = CounterDescriptor_Unit_UNIT_COUNT;
|
|
static constexpr auto UNIT_SIZE_BYTES = CounterDescriptor_Unit_UNIT_SIZE_BYTES;
|
|
static constexpr auto Unit_MIN = CounterDescriptor_Unit_UNIT_UNSPECIFIED;
|
|
static constexpr auto Unit_MAX = CounterDescriptor_Unit_UNIT_SIZE_BYTES;
|
|
enum FieldNumbers {
|
|
kTypeFieldNumber = 1,
|
|
kCategoriesFieldNumber = 2,
|
|
kUnitFieldNumber = 3,
|
|
kUnitMultiplierFieldNumber = 4,
|
|
kIsIncrementalFieldNumber = 5,
|
|
};
|
|
|
|
CounterDescriptor();
|
|
~CounterDescriptor() override;
|
|
CounterDescriptor(CounterDescriptor&&) noexcept;
|
|
CounterDescriptor& operator=(CounterDescriptor&&);
|
|
CounterDescriptor(const CounterDescriptor&);
|
|
CounterDescriptor& operator=(const CounterDescriptor&);
|
|
bool operator==(const CounterDescriptor&) const;
|
|
bool operator!=(const CounterDescriptor& other) const { return !(*this == other); }
|
|
|
|
bool ParseFromArray(const void*, size_t) override;
|
|
std::string SerializeAsString() const override;
|
|
std::vector<uint8_t> SerializeAsArray() const override;
|
|
void Serialize(::protozero::Message*) const;
|
|
|
|
bool has_type() const { return _has_field_[1]; }
|
|
CounterDescriptor_BuiltinCounterType type() const { return type_; }
|
|
void set_type(CounterDescriptor_BuiltinCounterType value) { type_ = value; _has_field_.set(1); }
|
|
|
|
int categories_size() const { return static_cast<int>(categories_.size()); }
|
|
const std::vector<std::string>& categories() const { return categories_; }
|
|
std::vector<std::string>* mutable_categories() { return &categories_; }
|
|
void clear_categories() { categories_.clear(); }
|
|
void add_categories(std::string value) { categories_.emplace_back(value); }
|
|
std::string* add_categories() { categories_.emplace_back(); return &categories_.back(); }
|
|
|
|
bool has_unit() const { return _has_field_[3]; }
|
|
CounterDescriptor_Unit unit() const { return unit_; }
|
|
void set_unit(CounterDescriptor_Unit value) { unit_ = value; _has_field_.set(3); }
|
|
|
|
bool has_unit_multiplier() const { return _has_field_[4]; }
|
|
int64_t unit_multiplier() const { return unit_multiplier_; }
|
|
void set_unit_multiplier(int64_t value) { unit_multiplier_ = value; _has_field_.set(4); }
|
|
|
|
bool has_is_incremental() const { return _has_field_[5]; }
|
|
bool is_incremental() const { return is_incremental_; }
|
|
void set_is_incremental(bool value) { is_incremental_ = value; _has_field_.set(5); }
|
|
|
|
private:
|
|
CounterDescriptor_BuiltinCounterType type_{};
|
|
std::vector<std::string> categories_;
|
|
CounterDescriptor_Unit unit_{};
|
|
int64_t unit_multiplier_{};
|
|
bool is_incremental_{};
|
|
|
|
// Allows to preserve unknown protobuf fields for compatibility
|
|
// with future versions of .proto files.
|
|
std::string unknown_fields_;
|
|
|
|
std::bitset<6> _has_field_{};
|
|
};
|
|
|
|
} // namespace perfetto
|
|
} // namespace protos
|
|
} // namespace gen
|
|
|
|
#endif // PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_COUNTER_DESCRIPTOR_PROTO_CPP_H_
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/scattered_heap_buffer.h"
|
|
// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
|
|
#pragma GCC diagnostic push
|
|
#pragma GCC diagnostic ignored "-Wfloat-equal"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/trace/track_event/counter_descriptor.gen.h"
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace gen {
|
|
|
|
CounterDescriptor::CounterDescriptor() = default;
|
|
CounterDescriptor::~CounterDescriptor() = default;
|
|
CounterDescriptor::CounterDescriptor(const CounterDescriptor&) = default;
|
|
CounterDescriptor& CounterDescriptor::operator=(const CounterDescriptor&) = default;
|
|
CounterDescriptor::CounterDescriptor(CounterDescriptor&&) noexcept = default;
|
|
CounterDescriptor& CounterDescriptor::operator=(CounterDescriptor&&) = default;
|
|
|
|
bool CounterDescriptor::operator==(const CounterDescriptor& other) const {
|
|
return unknown_fields_ == other.unknown_fields_
|
|
&& type_ == other.type_
|
|
&& categories_ == other.categories_
|
|
&& unit_ == other.unit_
|
|
&& unit_multiplier_ == other.unit_multiplier_
|
|
&& is_incremental_ == other.is_incremental_;
|
|
}
|
|
|
|
bool CounterDescriptor::ParseFromArray(const void* raw, size_t size) {
|
|
categories_.clear();
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* type */:
|
|
field.get(&type_);
|
|
break;
|
|
case 2 /* categories */:
|
|
categories_.emplace_back();
|
|
field.get(&categories_.back());
|
|
break;
|
|
case 3 /* unit */:
|
|
field.get(&unit_);
|
|
break;
|
|
case 4 /* unit_multiplier */:
|
|
field.get(&unit_multiplier_);
|
|
break;
|
|
case 5 /* is_incremental */:
|
|
field.get(&is_incremental_);
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string CounterDescriptor::SerializeAsString() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> CounterDescriptor::SerializeAsArray() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void CounterDescriptor::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: type
|
|
if (_has_field_[1]) {
|
|
msg->AppendVarInt(1, type_);
|
|
}
|
|
|
|
// Field 2: categories
|
|
for (auto& it : categories_) {
|
|
msg->AppendString(2, it);
|
|
}
|
|
|
|
// Field 3: unit
|
|
if (_has_field_[3]) {
|
|
msg->AppendVarInt(3, unit_);
|
|
}
|
|
|
|
// Field 4: unit_multiplier
|
|
if (_has_field_[4]) {
|
|
msg->AppendVarInt(4, unit_multiplier_);
|
|
}
|
|
|
|
// Field 5: is_incremental
|
|
if (_has_field_[5]) {
|
|
msg->AppendTinyVarInt(5, is_incremental_);
|
|
}
|
|
|
|
msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
|
|
}
|
|
|
|
} // namespace perfetto
|
|
} // namespace protos
|
|
} // namespace gen
|
|
#pragma GCC diagnostic pop
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/track_event/debug_annotation.gen.cc
|
|
// gen_amalgamated begin header: gen/protos/perfetto/trace/track_event/debug_annotation.gen.h
|
|
// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
|
|
#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_DEBUG_ANNOTATION_PROTO_CPP_H_
|
|
#define PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_DEBUG_ANNOTATION_PROTO_CPP_H_
|
|
|
|
#include <stdint.h>
|
|
#include <bitset>
|
|
#include <vector>
|
|
#include <string>
|
|
#include <type_traits>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/cpp_message_obj.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/copyable_ptr.h"
|
|
// gen_amalgamated expanded: #include "perfetto/base/export.h"
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace gen {
|
|
class DebugAnnotationName;
|
|
class DebugAnnotation;
|
|
class DebugAnnotation_NestedValue;
|
|
enum DebugAnnotation_NestedValue_NestedType : int;
|
|
} // namespace perfetto
|
|
} // namespace protos
|
|
} // namespace gen
|
|
|
|
namespace protozero {
|
|
class Message;
|
|
} // namespace protozero
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace gen {
|
|
enum DebugAnnotation_NestedValue_NestedType : int {
|
|
DebugAnnotation_NestedValue_NestedType_UNSPECIFIED = 0,
|
|
DebugAnnotation_NestedValue_NestedType_DICT = 1,
|
|
DebugAnnotation_NestedValue_NestedType_ARRAY = 2,
|
|
};
|
|
|
|
class PERFETTO_EXPORT DebugAnnotationName : public ::protozero::CppMessageObj {
|
|
public:
|
|
enum FieldNumbers {
|
|
kIidFieldNumber = 1,
|
|
kNameFieldNumber = 2,
|
|
};
|
|
|
|
DebugAnnotationName();
|
|
~DebugAnnotationName() override;
|
|
DebugAnnotationName(DebugAnnotationName&&) noexcept;
|
|
DebugAnnotationName& operator=(DebugAnnotationName&&);
|
|
DebugAnnotationName(const DebugAnnotationName&);
|
|
DebugAnnotationName& operator=(const DebugAnnotationName&);
|
|
bool operator==(const DebugAnnotationName&) const;
|
|
bool operator!=(const DebugAnnotationName& other) const { return !(*this == other); }
|
|
|
|
bool ParseFromArray(const void*, size_t) override;
|
|
std::string SerializeAsString() const override;
|
|
std::vector<uint8_t> SerializeAsArray() const override;
|
|
void Serialize(::protozero::Message*) const;
|
|
|
|
bool has_iid() const { return _has_field_[1]; }
|
|
uint64_t iid() const { return iid_; }
|
|
void set_iid(uint64_t value) { iid_ = value; _has_field_.set(1); }
|
|
|
|
bool has_name() const { return _has_field_[2]; }
|
|
const std::string& name() const { return name_; }
|
|
void set_name(const std::string& value) { name_ = value; _has_field_.set(2); }
|
|
|
|
private:
|
|
uint64_t iid_{};
|
|
std::string name_{};
|
|
|
|
// Allows to preserve unknown protobuf fields for compatibility
|
|
// with future versions of .proto files.
|
|
std::string unknown_fields_;
|
|
|
|
std::bitset<3> _has_field_{};
|
|
};
|
|
|
|
|
|
class PERFETTO_EXPORT DebugAnnotation : public ::protozero::CppMessageObj {
|
|
public:
|
|
using NestedValue = DebugAnnotation_NestedValue;
|
|
enum FieldNumbers {
|
|
kNameIidFieldNumber = 1,
|
|
kNameFieldNumber = 10,
|
|
kBoolValueFieldNumber = 2,
|
|
kUintValueFieldNumber = 3,
|
|
kIntValueFieldNumber = 4,
|
|
kDoubleValueFieldNumber = 5,
|
|
kStringValueFieldNumber = 6,
|
|
kPointerValueFieldNumber = 7,
|
|
kNestedValueFieldNumber = 8,
|
|
kLegacyJsonValueFieldNumber = 9,
|
|
};
|
|
|
|
DebugAnnotation();
|
|
~DebugAnnotation() override;
|
|
DebugAnnotation(DebugAnnotation&&) noexcept;
|
|
DebugAnnotation& operator=(DebugAnnotation&&);
|
|
DebugAnnotation(const DebugAnnotation&);
|
|
DebugAnnotation& operator=(const DebugAnnotation&);
|
|
bool operator==(const DebugAnnotation&) const;
|
|
bool operator!=(const DebugAnnotation& other) const { return !(*this == other); }
|
|
|
|
bool ParseFromArray(const void*, size_t) override;
|
|
std::string SerializeAsString() const override;
|
|
std::vector<uint8_t> SerializeAsArray() const override;
|
|
void Serialize(::protozero::Message*) const;
|
|
|
|
bool has_name_iid() const { return _has_field_[1]; }
|
|
uint64_t name_iid() const { return name_iid_; }
|
|
void set_name_iid(uint64_t value) { name_iid_ = value; _has_field_.set(1); }
|
|
|
|
bool has_name() const { return _has_field_[10]; }
|
|
const std::string& name() const { return name_; }
|
|
void set_name(const std::string& value) { name_ = value; _has_field_.set(10); }
|
|
|
|
bool has_bool_value() const { return _has_field_[2]; }
|
|
bool bool_value() const { return bool_value_; }
|
|
void set_bool_value(bool value) { bool_value_ = value; _has_field_.set(2); }
|
|
|
|
bool has_uint_value() const { return _has_field_[3]; }
|
|
uint64_t uint_value() const { return uint_value_; }
|
|
void set_uint_value(uint64_t value) { uint_value_ = value; _has_field_.set(3); }
|
|
|
|
bool has_int_value() const { return _has_field_[4]; }
|
|
int64_t int_value() const { return int_value_; }
|
|
void set_int_value(int64_t value) { int_value_ = value; _has_field_.set(4); }
|
|
|
|
bool has_double_value() const { return _has_field_[5]; }
|
|
double double_value() const { return double_value_; }
|
|
void set_double_value(double value) { double_value_ = value; _has_field_.set(5); }
|
|
|
|
bool has_string_value() const { return _has_field_[6]; }
|
|
const std::string& string_value() const { return string_value_; }
|
|
void set_string_value(const std::string& value) { string_value_ = value; _has_field_.set(6); }
|
|
|
|
bool has_pointer_value() const { return _has_field_[7]; }
|
|
uint64_t pointer_value() const { return pointer_value_; }
|
|
void set_pointer_value(uint64_t value) { pointer_value_ = value; _has_field_.set(7); }
|
|
|
|
bool has_nested_value() const { return _has_field_[8]; }
|
|
const DebugAnnotation_NestedValue& nested_value() const { return *nested_value_; }
|
|
DebugAnnotation_NestedValue* mutable_nested_value() { _has_field_.set(8); return nested_value_.get(); }
|
|
|
|
bool has_legacy_json_value() const { return _has_field_[9]; }
|
|
const std::string& legacy_json_value() const { return legacy_json_value_; }
|
|
void set_legacy_json_value(const std::string& value) { legacy_json_value_ = value; _has_field_.set(9); }
|
|
|
|
private:
|
|
uint64_t name_iid_{};
|
|
std::string name_{};
|
|
bool bool_value_{};
|
|
uint64_t uint_value_{};
|
|
int64_t int_value_{};
|
|
double double_value_{};
|
|
std::string string_value_{};
|
|
uint64_t pointer_value_{};
|
|
::protozero::CopyablePtr<DebugAnnotation_NestedValue> nested_value_;
|
|
std::string legacy_json_value_{};
|
|
|
|
// Allows to preserve unknown protobuf fields for compatibility
|
|
// with future versions of .proto files.
|
|
std::string unknown_fields_;
|
|
|
|
std::bitset<11> _has_field_{};
|
|
};
|
|
|
|
|
|
class PERFETTO_EXPORT DebugAnnotation_NestedValue : public ::protozero::CppMessageObj {
|
|
public:
|
|
using NestedType = DebugAnnotation_NestedValue_NestedType;
|
|
static constexpr auto UNSPECIFIED = DebugAnnotation_NestedValue_NestedType_UNSPECIFIED;
|
|
static constexpr auto DICT = DebugAnnotation_NestedValue_NestedType_DICT;
|
|
static constexpr auto ARRAY = DebugAnnotation_NestedValue_NestedType_ARRAY;
|
|
static constexpr auto NestedType_MIN = DebugAnnotation_NestedValue_NestedType_UNSPECIFIED;
|
|
static constexpr auto NestedType_MAX = DebugAnnotation_NestedValue_NestedType_ARRAY;
|
|
enum FieldNumbers {
|
|
kNestedTypeFieldNumber = 1,
|
|
kDictKeysFieldNumber = 2,
|
|
kDictValuesFieldNumber = 3,
|
|
kArrayValuesFieldNumber = 4,
|
|
kIntValueFieldNumber = 5,
|
|
kDoubleValueFieldNumber = 6,
|
|
kBoolValueFieldNumber = 7,
|
|
kStringValueFieldNumber = 8,
|
|
};
|
|
|
|
DebugAnnotation_NestedValue();
|
|
~DebugAnnotation_NestedValue() override;
|
|
DebugAnnotation_NestedValue(DebugAnnotation_NestedValue&&) noexcept;
|
|
DebugAnnotation_NestedValue& operator=(DebugAnnotation_NestedValue&&);
|
|
DebugAnnotation_NestedValue(const DebugAnnotation_NestedValue&);
|
|
DebugAnnotation_NestedValue& operator=(const DebugAnnotation_NestedValue&);
|
|
bool operator==(const DebugAnnotation_NestedValue&) const;
|
|
bool operator!=(const DebugAnnotation_NestedValue& other) const { return !(*this == other); }
|
|
|
|
bool ParseFromArray(const void*, size_t) override;
|
|
std::string SerializeAsString() const override;
|
|
std::vector<uint8_t> SerializeAsArray() const override;
|
|
void Serialize(::protozero::Message*) const;
|
|
|
|
bool has_nested_type() const { return _has_field_[1]; }
|
|
DebugAnnotation_NestedValue_NestedType nested_type() const { return nested_type_; }
|
|
void set_nested_type(DebugAnnotation_NestedValue_NestedType value) { nested_type_ = value; _has_field_.set(1); }
|
|
|
|
int dict_keys_size() const { return static_cast<int>(dict_keys_.size()); }
|
|
const std::vector<std::string>& dict_keys() const { return dict_keys_; }
|
|
std::vector<std::string>* mutable_dict_keys() { return &dict_keys_; }
|
|
void clear_dict_keys() { dict_keys_.clear(); }
|
|
void add_dict_keys(std::string value) { dict_keys_.emplace_back(value); }
|
|
std::string* add_dict_keys() { dict_keys_.emplace_back(); return &dict_keys_.back(); }
|
|
|
|
int dict_values_size() const { return static_cast<int>(dict_values_.size()); }
|
|
const std::vector<DebugAnnotation_NestedValue>& dict_values() const { return dict_values_; }
|
|
std::vector<DebugAnnotation_NestedValue>* mutable_dict_values() { return &dict_values_; }
|
|
void clear_dict_values() { dict_values_.clear(); }
|
|
DebugAnnotation_NestedValue* add_dict_values() { dict_values_.emplace_back(); return &dict_values_.back(); }
|
|
|
|
int array_values_size() const { return static_cast<int>(array_values_.size()); }
|
|
const std::vector<DebugAnnotation_NestedValue>& array_values() const { return array_values_; }
|
|
std::vector<DebugAnnotation_NestedValue>* mutable_array_values() { return &array_values_; }
|
|
void clear_array_values() { array_values_.clear(); }
|
|
DebugAnnotation_NestedValue* add_array_values() { array_values_.emplace_back(); return &array_values_.back(); }
|
|
|
|
bool has_int_value() const { return _has_field_[5]; }
|
|
int64_t int_value() const { return int_value_; }
|
|
void set_int_value(int64_t value) { int_value_ = value; _has_field_.set(5); }
|
|
|
|
bool has_double_value() const { return _has_field_[6]; }
|
|
double double_value() const { return double_value_; }
|
|
void set_double_value(double value) { double_value_ = value; _has_field_.set(6); }
|
|
|
|
bool has_bool_value() const { return _has_field_[7]; }
|
|
bool bool_value() const { return bool_value_; }
|
|
void set_bool_value(bool value) { bool_value_ = value; _has_field_.set(7); }
|
|
|
|
bool has_string_value() const { return _has_field_[8]; }
|
|
const std::string& string_value() const { return string_value_; }
|
|
void set_string_value(const std::string& value) { string_value_ = value; _has_field_.set(8); }
|
|
|
|
private:
|
|
DebugAnnotation_NestedValue_NestedType nested_type_{};
|
|
std::vector<std::string> dict_keys_;
|
|
std::vector<DebugAnnotation_NestedValue> dict_values_;
|
|
std::vector<DebugAnnotation_NestedValue> array_values_;
|
|
int64_t int_value_{};
|
|
double double_value_{};
|
|
bool bool_value_{};
|
|
std::string string_value_{};
|
|
|
|
// Allows to preserve unknown protobuf fields for compatibility
|
|
// with future versions of .proto files.
|
|
std::string unknown_fields_;
|
|
|
|
std::bitset<9> _has_field_{};
|
|
};
|
|
|
|
} // namespace perfetto
|
|
} // namespace protos
|
|
} // namespace gen
|
|
|
|
#endif // PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_DEBUG_ANNOTATION_PROTO_CPP_H_
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/scattered_heap_buffer.h"
|
|
// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
|
|
#pragma GCC diagnostic push
|
|
#pragma GCC diagnostic ignored "-Wfloat-equal"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/trace/track_event/debug_annotation.gen.h"
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace gen {
|
|
|
|
DebugAnnotationName::DebugAnnotationName() = default;
|
|
DebugAnnotationName::~DebugAnnotationName() = default;
|
|
DebugAnnotationName::DebugAnnotationName(const DebugAnnotationName&) = default;
|
|
DebugAnnotationName& DebugAnnotationName::operator=(const DebugAnnotationName&) = default;
|
|
DebugAnnotationName::DebugAnnotationName(DebugAnnotationName&&) noexcept = default;
|
|
DebugAnnotationName& DebugAnnotationName::operator=(DebugAnnotationName&&) = default;
|
|
|
|
bool DebugAnnotationName::operator==(const DebugAnnotationName& other) const {
|
|
return unknown_fields_ == other.unknown_fields_
|
|
&& iid_ == other.iid_
|
|
&& name_ == other.name_;
|
|
}
|
|
|
|
bool DebugAnnotationName::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* iid */:
|
|
field.get(&iid_);
|
|
break;
|
|
case 2 /* name */:
|
|
field.get(&name_);
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string DebugAnnotationName::SerializeAsString() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> DebugAnnotationName::SerializeAsArray() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void DebugAnnotationName::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: iid
|
|
if (_has_field_[1]) {
|
|
msg->AppendVarInt(1, iid_);
|
|
}
|
|
|
|
// Field 2: name
|
|
if (_has_field_[2]) {
|
|
msg->AppendString(2, name_);
|
|
}
|
|
|
|
msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
|
|
}
|
|
|
|
|
|
DebugAnnotation::DebugAnnotation() = default;
|
|
DebugAnnotation::~DebugAnnotation() = default;
|
|
DebugAnnotation::DebugAnnotation(const DebugAnnotation&) = default;
|
|
DebugAnnotation& DebugAnnotation::operator=(const DebugAnnotation&) = default;
|
|
DebugAnnotation::DebugAnnotation(DebugAnnotation&&) noexcept = default;
|
|
DebugAnnotation& DebugAnnotation::operator=(DebugAnnotation&&) = default;
|
|
|
|
bool DebugAnnotation::operator==(const DebugAnnotation& other) const {
|
|
return unknown_fields_ == other.unknown_fields_
|
|
&& name_iid_ == other.name_iid_
|
|
&& name_ == other.name_
|
|
&& bool_value_ == other.bool_value_
|
|
&& uint_value_ == other.uint_value_
|
|
&& int_value_ == other.int_value_
|
|
&& double_value_ == other.double_value_
|
|
&& string_value_ == other.string_value_
|
|
&& pointer_value_ == other.pointer_value_
|
|
&& nested_value_ == other.nested_value_
|
|
&& legacy_json_value_ == other.legacy_json_value_;
|
|
}
|
|
|
|
bool DebugAnnotation::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* name_iid */:
|
|
field.get(&name_iid_);
|
|
break;
|
|
case 10 /* name */:
|
|
field.get(&name_);
|
|
break;
|
|
case 2 /* bool_value */:
|
|
field.get(&bool_value_);
|
|
break;
|
|
case 3 /* uint_value */:
|
|
field.get(&uint_value_);
|
|
break;
|
|
case 4 /* int_value */:
|
|
field.get(&int_value_);
|
|
break;
|
|
case 5 /* double_value */:
|
|
field.get(&double_value_);
|
|
break;
|
|
case 6 /* string_value */:
|
|
field.get(&string_value_);
|
|
break;
|
|
case 7 /* pointer_value */:
|
|
field.get(&pointer_value_);
|
|
break;
|
|
case 8 /* nested_value */:
|
|
(*nested_value_).ParseFromString(field.as_std_string());
|
|
break;
|
|
case 9 /* legacy_json_value */:
|
|
field.get(&legacy_json_value_);
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string DebugAnnotation::SerializeAsString() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> DebugAnnotation::SerializeAsArray() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void DebugAnnotation::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: name_iid
|
|
if (_has_field_[1]) {
|
|
msg->AppendVarInt(1, name_iid_);
|
|
}
|
|
|
|
// Field 10: name
|
|
if (_has_field_[10]) {
|
|
msg->AppendString(10, name_);
|
|
}
|
|
|
|
// Field 2: bool_value
|
|
if (_has_field_[2]) {
|
|
msg->AppendTinyVarInt(2, bool_value_);
|
|
}
|
|
|
|
// Field 3: uint_value
|
|
if (_has_field_[3]) {
|
|
msg->AppendVarInt(3, uint_value_);
|
|
}
|
|
|
|
// Field 4: int_value
|
|
if (_has_field_[4]) {
|
|
msg->AppendVarInt(4, int_value_);
|
|
}
|
|
|
|
// Field 5: double_value
|
|
if (_has_field_[5]) {
|
|
msg->AppendFixed(5, double_value_);
|
|
}
|
|
|
|
// Field 6: string_value
|
|
if (_has_field_[6]) {
|
|
msg->AppendString(6, string_value_);
|
|
}
|
|
|
|
// Field 7: pointer_value
|
|
if (_has_field_[7]) {
|
|
msg->AppendVarInt(7, pointer_value_);
|
|
}
|
|
|
|
// Field 8: nested_value
|
|
if (_has_field_[8]) {
|
|
(*nested_value_).Serialize(msg->BeginNestedMessage<::protozero::Message>(8));
|
|
}
|
|
|
|
// Field 9: legacy_json_value
|
|
if (_has_field_[9]) {
|
|
msg->AppendString(9, legacy_json_value_);
|
|
}
|
|
|
|
msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
|
|
}
|
|
|
|
|
|
DebugAnnotation_NestedValue::DebugAnnotation_NestedValue() = default;
|
|
DebugAnnotation_NestedValue::~DebugAnnotation_NestedValue() = default;
|
|
DebugAnnotation_NestedValue::DebugAnnotation_NestedValue(const DebugAnnotation_NestedValue&) = default;
|
|
DebugAnnotation_NestedValue& DebugAnnotation_NestedValue::operator=(const DebugAnnotation_NestedValue&) = default;
|
|
DebugAnnotation_NestedValue::DebugAnnotation_NestedValue(DebugAnnotation_NestedValue&&) noexcept = default;
|
|
DebugAnnotation_NestedValue& DebugAnnotation_NestedValue::operator=(DebugAnnotation_NestedValue&&) = default;
|
|
|
|
bool DebugAnnotation_NestedValue::operator==(const DebugAnnotation_NestedValue& other) const {
|
|
return unknown_fields_ == other.unknown_fields_
|
|
&& nested_type_ == other.nested_type_
|
|
&& dict_keys_ == other.dict_keys_
|
|
&& dict_values_ == other.dict_values_
|
|
&& array_values_ == other.array_values_
|
|
&& int_value_ == other.int_value_
|
|
&& double_value_ == other.double_value_
|
|
&& bool_value_ == other.bool_value_
|
|
&& string_value_ == other.string_value_;
|
|
}
|
|
|
|
bool DebugAnnotation_NestedValue::ParseFromArray(const void* raw, size_t size) {
|
|
dict_keys_.clear();
|
|
dict_values_.clear();
|
|
array_values_.clear();
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* nested_type */:
|
|
field.get(&nested_type_);
|
|
break;
|
|
case 2 /* dict_keys */:
|
|
dict_keys_.emplace_back();
|
|
field.get(&dict_keys_.back());
|
|
break;
|
|
case 3 /* dict_values */:
|
|
dict_values_.emplace_back();
|
|
dict_values_.back().ParseFromString(field.as_std_string());
|
|
break;
|
|
case 4 /* array_values */:
|
|
array_values_.emplace_back();
|
|
array_values_.back().ParseFromString(field.as_std_string());
|
|
break;
|
|
case 5 /* int_value */:
|
|
field.get(&int_value_);
|
|
break;
|
|
case 6 /* double_value */:
|
|
field.get(&double_value_);
|
|
break;
|
|
case 7 /* bool_value */:
|
|
field.get(&bool_value_);
|
|
break;
|
|
case 8 /* string_value */:
|
|
field.get(&string_value_);
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string DebugAnnotation_NestedValue::SerializeAsString() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> DebugAnnotation_NestedValue::SerializeAsArray() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void DebugAnnotation_NestedValue::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: nested_type
|
|
if (_has_field_[1]) {
|
|
msg->AppendVarInt(1, nested_type_);
|
|
}
|
|
|
|
// Field 2: dict_keys
|
|
for (auto& it : dict_keys_) {
|
|
msg->AppendString(2, it);
|
|
}
|
|
|
|
// Field 3: dict_values
|
|
for (auto& it : dict_values_) {
|
|
it.Serialize(msg->BeginNestedMessage<::protozero::Message>(3));
|
|
}
|
|
|
|
// Field 4: array_values
|
|
for (auto& it : array_values_) {
|
|
it.Serialize(msg->BeginNestedMessage<::protozero::Message>(4));
|
|
}
|
|
|
|
// Field 5: int_value
|
|
if (_has_field_[5]) {
|
|
msg->AppendVarInt(5, int_value_);
|
|
}
|
|
|
|
// Field 6: double_value
|
|
if (_has_field_[6]) {
|
|
msg->AppendFixed(6, double_value_);
|
|
}
|
|
|
|
// Field 7: bool_value
|
|
if (_has_field_[7]) {
|
|
msg->AppendTinyVarInt(7, bool_value_);
|
|
}
|
|
|
|
// Field 8: string_value
|
|
if (_has_field_[8]) {
|
|
msg->AppendString(8, string_value_);
|
|
}
|
|
|
|
msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
|
|
}
|
|
|
|
} // namespace perfetto
|
|
} // namespace protos
|
|
} // namespace gen
|
|
#pragma GCC diagnostic pop
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/track_event/log_message.gen.cc
|
|
// gen_amalgamated begin header: gen/protos/perfetto/trace/track_event/log_message.gen.h
|
|
// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
|
|
#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_LOG_MESSAGE_PROTO_CPP_H_
|
|
#define PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_LOG_MESSAGE_PROTO_CPP_H_
|
|
|
|
#include <stdint.h>
|
|
#include <bitset>
|
|
#include <vector>
|
|
#include <string>
|
|
#include <type_traits>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/cpp_message_obj.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/copyable_ptr.h"
|
|
// gen_amalgamated expanded: #include "perfetto/base/export.h"
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace gen {
|
|
class LogMessageBody;
|
|
class LogMessage;
|
|
} // namespace perfetto
|
|
} // namespace protos
|
|
} // namespace gen
|
|
|
|
namespace protozero {
|
|
class Message;
|
|
} // namespace protozero
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace gen {
|
|
|
|
class PERFETTO_EXPORT LogMessageBody : public ::protozero::CppMessageObj {
|
|
public:
|
|
enum FieldNumbers {
|
|
kIidFieldNumber = 1,
|
|
kBodyFieldNumber = 2,
|
|
};
|
|
|
|
LogMessageBody();
|
|
~LogMessageBody() override;
|
|
LogMessageBody(LogMessageBody&&) noexcept;
|
|
LogMessageBody& operator=(LogMessageBody&&);
|
|
LogMessageBody(const LogMessageBody&);
|
|
LogMessageBody& operator=(const LogMessageBody&);
|
|
bool operator==(const LogMessageBody&) const;
|
|
bool operator!=(const LogMessageBody& other) const { return !(*this == other); }
|
|
|
|
bool ParseFromArray(const void*, size_t) override;
|
|
std::string SerializeAsString() const override;
|
|
std::vector<uint8_t> SerializeAsArray() const override;
|
|
void Serialize(::protozero::Message*) const;
|
|
|
|
bool has_iid() const { return _has_field_[1]; }
|
|
uint64_t iid() const { return iid_; }
|
|
void set_iid(uint64_t value) { iid_ = value; _has_field_.set(1); }
|
|
|
|
bool has_body() const { return _has_field_[2]; }
|
|
const std::string& body() const { return body_; }
|
|
void set_body(const std::string& value) { body_ = value; _has_field_.set(2); }
|
|
|
|
private:
|
|
uint64_t iid_{};
|
|
std::string body_{};
|
|
|
|
// Allows to preserve unknown protobuf fields for compatibility
|
|
// with future versions of .proto files.
|
|
std::string unknown_fields_;
|
|
|
|
std::bitset<3> _has_field_{};
|
|
};
|
|
|
|
|
|
class PERFETTO_EXPORT LogMessage : public ::protozero::CppMessageObj {
|
|
public:
|
|
enum FieldNumbers {
|
|
kSourceLocationIidFieldNumber = 1,
|
|
kBodyIidFieldNumber = 2,
|
|
};
|
|
|
|
LogMessage();
|
|
~LogMessage() override;
|
|
LogMessage(LogMessage&&) noexcept;
|
|
LogMessage& operator=(LogMessage&&);
|
|
LogMessage(const LogMessage&);
|
|
LogMessage& operator=(const LogMessage&);
|
|
bool operator==(const LogMessage&) const;
|
|
bool operator!=(const LogMessage& other) const { return !(*this == other); }
|
|
|
|
bool ParseFromArray(const void*, size_t) override;
|
|
std::string SerializeAsString() const override;
|
|
std::vector<uint8_t> SerializeAsArray() const override;
|
|
void Serialize(::protozero::Message*) const;
|
|
|
|
bool has_source_location_iid() const { return _has_field_[1]; }
|
|
uint64_t source_location_iid() const { return source_location_iid_; }
|
|
void set_source_location_iid(uint64_t value) { source_location_iid_ = value; _has_field_.set(1); }
|
|
|
|
bool has_body_iid() const { return _has_field_[2]; }
|
|
uint64_t body_iid() const { return body_iid_; }
|
|
void set_body_iid(uint64_t value) { body_iid_ = value; _has_field_.set(2); }
|
|
|
|
private:
|
|
uint64_t source_location_iid_{};
|
|
uint64_t body_iid_{};
|
|
|
|
// Allows to preserve unknown protobuf fields for compatibility
|
|
// with future versions of .proto files.
|
|
std::string unknown_fields_;
|
|
|
|
std::bitset<3> _has_field_{};
|
|
};
|
|
|
|
} // namespace perfetto
|
|
} // namespace protos
|
|
} // namespace gen
|
|
|
|
#endif // PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_LOG_MESSAGE_PROTO_CPP_H_
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/scattered_heap_buffer.h"
|
|
// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
|
|
#pragma GCC diagnostic push
|
|
#pragma GCC diagnostic ignored "-Wfloat-equal"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/trace/track_event/log_message.gen.h"
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace gen {
|
|
|
|
LogMessageBody::LogMessageBody() = default;
|
|
LogMessageBody::~LogMessageBody() = default;
|
|
LogMessageBody::LogMessageBody(const LogMessageBody&) = default;
|
|
LogMessageBody& LogMessageBody::operator=(const LogMessageBody&) = default;
|
|
LogMessageBody::LogMessageBody(LogMessageBody&&) noexcept = default;
|
|
LogMessageBody& LogMessageBody::operator=(LogMessageBody&&) = default;
|
|
|
|
bool LogMessageBody::operator==(const LogMessageBody& other) const {
|
|
return unknown_fields_ == other.unknown_fields_
|
|
&& iid_ == other.iid_
|
|
&& body_ == other.body_;
|
|
}
|
|
|
|
bool LogMessageBody::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* iid */:
|
|
field.get(&iid_);
|
|
break;
|
|
case 2 /* body */:
|
|
field.get(&body_);
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string LogMessageBody::SerializeAsString() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> LogMessageBody::SerializeAsArray() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void LogMessageBody::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: iid
|
|
if (_has_field_[1]) {
|
|
msg->AppendVarInt(1, iid_);
|
|
}
|
|
|
|
// Field 2: body
|
|
if (_has_field_[2]) {
|
|
msg->AppendString(2, body_);
|
|
}
|
|
|
|
msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
|
|
}
|
|
|
|
|
|
LogMessage::LogMessage() = default;
|
|
LogMessage::~LogMessage() = default;
|
|
LogMessage::LogMessage(const LogMessage&) = default;
|
|
LogMessage& LogMessage::operator=(const LogMessage&) = default;
|
|
LogMessage::LogMessage(LogMessage&&) noexcept = default;
|
|
LogMessage& LogMessage::operator=(LogMessage&&) = default;
|
|
|
|
bool LogMessage::operator==(const LogMessage& other) const {
|
|
return unknown_fields_ == other.unknown_fields_
|
|
&& source_location_iid_ == other.source_location_iid_
|
|
&& body_iid_ == other.body_iid_;
|
|
}
|
|
|
|
bool LogMessage::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* source_location_iid */:
|
|
field.get(&source_location_iid_);
|
|
break;
|
|
case 2 /* body_iid */:
|
|
field.get(&body_iid_);
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string LogMessage::SerializeAsString() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> LogMessage::SerializeAsArray() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void LogMessage::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: source_location_iid
|
|
if (_has_field_[1]) {
|
|
msg->AppendVarInt(1, source_location_iid_);
|
|
}
|
|
|
|
// Field 2: body_iid
|
|
if (_has_field_[2]) {
|
|
msg->AppendVarInt(2, body_iid_);
|
|
}
|
|
|
|
msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
|
|
}
|
|
|
|
} // namespace perfetto
|
|
} // namespace protos
|
|
} // namespace gen
|
|
#pragma GCC diagnostic pop
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/track_event/process_descriptor.gen.cc
|
|
// gen_amalgamated begin header: gen/protos/perfetto/trace/track_event/process_descriptor.gen.h
|
|
// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
|
|
#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_PROCESS_DESCRIPTOR_PROTO_CPP_H_
|
|
#define PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_PROCESS_DESCRIPTOR_PROTO_CPP_H_
|
|
|
|
#include <stdint.h>
|
|
#include <bitset>
|
|
#include <vector>
|
|
#include <string>
|
|
#include <type_traits>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/cpp_message_obj.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/copyable_ptr.h"
|
|
// gen_amalgamated expanded: #include "perfetto/base/export.h"
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace gen {
|
|
class ProcessDescriptor;
|
|
enum ProcessDescriptor_ChromeProcessType : int;
|
|
} // namespace perfetto
|
|
} // namespace protos
|
|
} // namespace gen
|
|
|
|
namespace protozero {
|
|
class Message;
|
|
} // namespace protozero
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace gen {
|
|
enum ProcessDescriptor_ChromeProcessType : int {
|
|
ProcessDescriptor_ChromeProcessType_PROCESS_UNSPECIFIED = 0,
|
|
ProcessDescriptor_ChromeProcessType_PROCESS_BROWSER = 1,
|
|
ProcessDescriptor_ChromeProcessType_PROCESS_RENDERER = 2,
|
|
ProcessDescriptor_ChromeProcessType_PROCESS_UTILITY = 3,
|
|
ProcessDescriptor_ChromeProcessType_PROCESS_ZYGOTE = 4,
|
|
ProcessDescriptor_ChromeProcessType_PROCESS_SANDBOX_HELPER = 5,
|
|
ProcessDescriptor_ChromeProcessType_PROCESS_GPU = 6,
|
|
ProcessDescriptor_ChromeProcessType_PROCESS_PPAPI_PLUGIN = 7,
|
|
ProcessDescriptor_ChromeProcessType_PROCESS_PPAPI_BROKER = 8,
|
|
};
|
|
|
|
class PERFETTO_EXPORT ProcessDescriptor : public ::protozero::CppMessageObj {
|
|
public:
|
|
using ChromeProcessType = ProcessDescriptor_ChromeProcessType;
|
|
static constexpr auto PROCESS_UNSPECIFIED = ProcessDescriptor_ChromeProcessType_PROCESS_UNSPECIFIED;
|
|
static constexpr auto PROCESS_BROWSER = ProcessDescriptor_ChromeProcessType_PROCESS_BROWSER;
|
|
static constexpr auto PROCESS_RENDERER = ProcessDescriptor_ChromeProcessType_PROCESS_RENDERER;
|
|
static constexpr auto PROCESS_UTILITY = ProcessDescriptor_ChromeProcessType_PROCESS_UTILITY;
|
|
static constexpr auto PROCESS_ZYGOTE = ProcessDescriptor_ChromeProcessType_PROCESS_ZYGOTE;
|
|
static constexpr auto PROCESS_SANDBOX_HELPER = ProcessDescriptor_ChromeProcessType_PROCESS_SANDBOX_HELPER;
|
|
static constexpr auto PROCESS_GPU = ProcessDescriptor_ChromeProcessType_PROCESS_GPU;
|
|
static constexpr auto PROCESS_PPAPI_PLUGIN = ProcessDescriptor_ChromeProcessType_PROCESS_PPAPI_PLUGIN;
|
|
static constexpr auto PROCESS_PPAPI_BROKER = ProcessDescriptor_ChromeProcessType_PROCESS_PPAPI_BROKER;
|
|
static constexpr auto ChromeProcessType_MIN = ProcessDescriptor_ChromeProcessType_PROCESS_UNSPECIFIED;
|
|
static constexpr auto ChromeProcessType_MAX = ProcessDescriptor_ChromeProcessType_PROCESS_PPAPI_BROKER;
|
|
enum FieldNumbers {
|
|
kPidFieldNumber = 1,
|
|
kCmdlineFieldNumber = 2,
|
|
kProcessNameFieldNumber = 6,
|
|
kProcessPriorityFieldNumber = 5,
|
|
kChromeProcessTypeFieldNumber = 4,
|
|
kLegacySortIndexFieldNumber = 3,
|
|
};
|
|
|
|
ProcessDescriptor();
|
|
~ProcessDescriptor() override;
|
|
ProcessDescriptor(ProcessDescriptor&&) noexcept;
|
|
ProcessDescriptor& operator=(ProcessDescriptor&&);
|
|
ProcessDescriptor(const ProcessDescriptor&);
|
|
ProcessDescriptor& operator=(const ProcessDescriptor&);
|
|
bool operator==(const ProcessDescriptor&) const;
|
|
bool operator!=(const ProcessDescriptor& other) const { return !(*this == other); }
|
|
|
|
bool ParseFromArray(const void*, size_t) override;
|
|
std::string SerializeAsString() const override;
|
|
std::vector<uint8_t> SerializeAsArray() const override;
|
|
void Serialize(::protozero::Message*) const;
|
|
|
|
bool has_pid() const { return _has_field_[1]; }
|
|
int32_t pid() const { return pid_; }
|
|
void set_pid(int32_t value) { pid_ = value; _has_field_.set(1); }
|
|
|
|
int cmdline_size() const { return static_cast<int>(cmdline_.size()); }
|
|
const std::vector<std::string>& cmdline() const { return cmdline_; }
|
|
std::vector<std::string>* mutable_cmdline() { return &cmdline_; }
|
|
void clear_cmdline() { cmdline_.clear(); }
|
|
void add_cmdline(std::string value) { cmdline_.emplace_back(value); }
|
|
std::string* add_cmdline() { cmdline_.emplace_back(); return &cmdline_.back(); }
|
|
|
|
bool has_process_name() const { return _has_field_[6]; }
|
|
const std::string& process_name() const { return process_name_; }
|
|
void set_process_name(const std::string& value) { process_name_ = value; _has_field_.set(6); }
|
|
|
|
bool has_process_priority() const { return _has_field_[5]; }
|
|
int32_t process_priority() const { return process_priority_; }
|
|
void set_process_priority(int32_t value) { process_priority_ = value; _has_field_.set(5); }
|
|
|
|
bool has_chrome_process_type() const { return _has_field_[4]; }
|
|
ProcessDescriptor_ChromeProcessType chrome_process_type() const { return chrome_process_type_; }
|
|
void set_chrome_process_type(ProcessDescriptor_ChromeProcessType value) { chrome_process_type_ = value; _has_field_.set(4); }
|
|
|
|
bool has_legacy_sort_index() const { return _has_field_[3]; }
|
|
int32_t legacy_sort_index() const { return legacy_sort_index_; }
|
|
void set_legacy_sort_index(int32_t value) { legacy_sort_index_ = value; _has_field_.set(3); }
|
|
|
|
private:
|
|
int32_t pid_{};
|
|
std::vector<std::string> cmdline_;
|
|
std::string process_name_{};
|
|
int32_t process_priority_{};
|
|
ProcessDescriptor_ChromeProcessType chrome_process_type_{};
|
|
int32_t legacy_sort_index_{};
|
|
|
|
// Allows to preserve unknown protobuf fields for compatibility
|
|
// with future versions of .proto files.
|
|
std::string unknown_fields_;
|
|
|
|
std::bitset<7> _has_field_{};
|
|
};
|
|
|
|
} // namespace perfetto
|
|
} // namespace protos
|
|
} // namespace gen
|
|
|
|
#endif // PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_PROCESS_DESCRIPTOR_PROTO_CPP_H_
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/scattered_heap_buffer.h"
|
|
// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
|
|
#pragma GCC diagnostic push
|
|
#pragma GCC diagnostic ignored "-Wfloat-equal"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/trace/track_event/process_descriptor.gen.h"
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace gen {
|
|
|
|
ProcessDescriptor::ProcessDescriptor() = default;
|
|
ProcessDescriptor::~ProcessDescriptor() = default;
|
|
ProcessDescriptor::ProcessDescriptor(const ProcessDescriptor&) = default;
|
|
ProcessDescriptor& ProcessDescriptor::operator=(const ProcessDescriptor&) = default;
|
|
ProcessDescriptor::ProcessDescriptor(ProcessDescriptor&&) noexcept = default;
|
|
ProcessDescriptor& ProcessDescriptor::operator=(ProcessDescriptor&&) = default;
|
|
|
|
bool ProcessDescriptor::operator==(const ProcessDescriptor& other) const {
|
|
return unknown_fields_ == other.unknown_fields_
|
|
&& pid_ == other.pid_
|
|
&& cmdline_ == other.cmdline_
|
|
&& process_name_ == other.process_name_
|
|
&& process_priority_ == other.process_priority_
|
|
&& chrome_process_type_ == other.chrome_process_type_
|
|
&& legacy_sort_index_ == other.legacy_sort_index_;
|
|
}
|
|
|
|
bool ProcessDescriptor::ParseFromArray(const void* raw, size_t size) {
|
|
cmdline_.clear();
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* pid */:
|
|
field.get(&pid_);
|
|
break;
|
|
case 2 /* cmdline */:
|
|
cmdline_.emplace_back();
|
|
field.get(&cmdline_.back());
|
|
break;
|
|
case 6 /* process_name */:
|
|
field.get(&process_name_);
|
|
break;
|
|
case 5 /* process_priority */:
|
|
field.get(&process_priority_);
|
|
break;
|
|
case 4 /* chrome_process_type */:
|
|
field.get(&chrome_process_type_);
|
|
break;
|
|
case 3 /* legacy_sort_index */:
|
|
field.get(&legacy_sort_index_);
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string ProcessDescriptor::SerializeAsString() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> ProcessDescriptor::SerializeAsArray() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void ProcessDescriptor::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: pid
|
|
if (_has_field_[1]) {
|
|
msg->AppendVarInt(1, pid_);
|
|
}
|
|
|
|
// Field 2: cmdline
|
|
for (auto& it : cmdline_) {
|
|
msg->AppendString(2, it);
|
|
}
|
|
|
|
// Field 6: process_name
|
|
if (_has_field_[6]) {
|
|
msg->AppendString(6, process_name_);
|
|
}
|
|
|
|
// Field 5: process_priority
|
|
if (_has_field_[5]) {
|
|
msg->AppendVarInt(5, process_priority_);
|
|
}
|
|
|
|
// Field 4: chrome_process_type
|
|
if (_has_field_[4]) {
|
|
msg->AppendVarInt(4, chrome_process_type_);
|
|
}
|
|
|
|
// Field 3: legacy_sort_index
|
|
if (_has_field_[3]) {
|
|
msg->AppendVarInt(3, legacy_sort_index_);
|
|
}
|
|
|
|
msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
|
|
}
|
|
|
|
} // namespace perfetto
|
|
} // namespace protos
|
|
} // namespace gen
|
|
#pragma GCC diagnostic pop
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/track_event/source_location.gen.cc
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/scattered_heap_buffer.h"
|
|
// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
|
|
#pragma GCC diagnostic push
|
|
#pragma GCC diagnostic ignored "-Wfloat-equal"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/trace/track_event/source_location.gen.h"
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace gen {
|
|
|
|
SourceLocation::SourceLocation() = default;
|
|
SourceLocation::~SourceLocation() = default;
|
|
SourceLocation::SourceLocation(const SourceLocation&) = default;
|
|
SourceLocation& SourceLocation::operator=(const SourceLocation&) = default;
|
|
SourceLocation::SourceLocation(SourceLocation&&) noexcept = default;
|
|
SourceLocation& SourceLocation::operator=(SourceLocation&&) = default;
|
|
|
|
bool SourceLocation::operator==(const SourceLocation& other) const {
|
|
return unknown_fields_ == other.unknown_fields_
|
|
&& iid_ == other.iid_
|
|
&& file_name_ == other.file_name_
|
|
&& function_name_ == other.function_name_
|
|
&& line_number_ == other.line_number_;
|
|
}
|
|
|
|
bool SourceLocation::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* iid */:
|
|
field.get(&iid_);
|
|
break;
|
|
case 2 /* file_name */:
|
|
field.get(&file_name_);
|
|
break;
|
|
case 3 /* function_name */:
|
|
field.get(&function_name_);
|
|
break;
|
|
case 4 /* line_number */:
|
|
field.get(&line_number_);
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string SourceLocation::SerializeAsString() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> SourceLocation::SerializeAsArray() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void SourceLocation::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: iid
|
|
if (_has_field_[1]) {
|
|
msg->AppendVarInt(1, iid_);
|
|
}
|
|
|
|
// Field 2: file_name
|
|
if (_has_field_[2]) {
|
|
msg->AppendString(2, file_name_);
|
|
}
|
|
|
|
// Field 3: function_name
|
|
if (_has_field_[3]) {
|
|
msg->AppendString(3, function_name_);
|
|
}
|
|
|
|
// Field 4: line_number
|
|
if (_has_field_[4]) {
|
|
msg->AppendVarInt(4, line_number_);
|
|
}
|
|
|
|
msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
|
|
}
|
|
|
|
} // namespace perfetto
|
|
} // namespace protos
|
|
} // namespace gen
|
|
#pragma GCC diagnostic pop
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/track_event/task_execution.gen.cc
|
|
// gen_amalgamated begin header: gen/protos/perfetto/trace/track_event/task_execution.gen.h
|
|
// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
|
|
#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_TASK_EXECUTION_PROTO_CPP_H_
|
|
#define PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_TASK_EXECUTION_PROTO_CPP_H_
|
|
|
|
#include <stdint.h>
|
|
#include <bitset>
|
|
#include <vector>
|
|
#include <string>
|
|
#include <type_traits>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/cpp_message_obj.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/copyable_ptr.h"
|
|
// gen_amalgamated expanded: #include "perfetto/base/export.h"
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace gen {
|
|
class TaskExecution;
|
|
} // namespace perfetto
|
|
} // namespace protos
|
|
} // namespace gen
|
|
|
|
namespace protozero {
|
|
class Message;
|
|
} // namespace protozero
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace gen {
|
|
|
|
class PERFETTO_EXPORT TaskExecution : public ::protozero::CppMessageObj {
|
|
public:
|
|
enum FieldNumbers {
|
|
kPostedFromIidFieldNumber = 1,
|
|
};
|
|
|
|
TaskExecution();
|
|
~TaskExecution() override;
|
|
TaskExecution(TaskExecution&&) noexcept;
|
|
TaskExecution& operator=(TaskExecution&&);
|
|
TaskExecution(const TaskExecution&);
|
|
TaskExecution& operator=(const TaskExecution&);
|
|
bool operator==(const TaskExecution&) const;
|
|
bool operator!=(const TaskExecution& other) const { return !(*this == other); }
|
|
|
|
bool ParseFromArray(const void*, size_t) override;
|
|
std::string SerializeAsString() const override;
|
|
std::vector<uint8_t> SerializeAsArray() const override;
|
|
void Serialize(::protozero::Message*) const;
|
|
|
|
bool has_posted_from_iid() const { return _has_field_[1]; }
|
|
uint64_t posted_from_iid() const { return posted_from_iid_; }
|
|
void set_posted_from_iid(uint64_t value) { posted_from_iid_ = value; _has_field_.set(1); }
|
|
|
|
private:
|
|
uint64_t posted_from_iid_{};
|
|
|
|
// Allows to preserve unknown protobuf fields for compatibility
|
|
// with future versions of .proto files.
|
|
std::string unknown_fields_;
|
|
|
|
std::bitset<2> _has_field_{};
|
|
};
|
|
|
|
} // namespace perfetto
|
|
} // namespace protos
|
|
} // namespace gen
|
|
|
|
#endif // PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_TASK_EXECUTION_PROTO_CPP_H_
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/scattered_heap_buffer.h"
|
|
// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
|
|
#pragma GCC diagnostic push
|
|
#pragma GCC diagnostic ignored "-Wfloat-equal"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/trace/track_event/task_execution.gen.h"
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace gen {
|
|
|
|
TaskExecution::TaskExecution() = default;
|
|
TaskExecution::~TaskExecution() = default;
|
|
TaskExecution::TaskExecution(const TaskExecution&) = default;
|
|
TaskExecution& TaskExecution::operator=(const TaskExecution&) = default;
|
|
TaskExecution::TaskExecution(TaskExecution&&) noexcept = default;
|
|
TaskExecution& TaskExecution::operator=(TaskExecution&&) = default;
|
|
|
|
bool TaskExecution::operator==(const TaskExecution& other) const {
|
|
return unknown_fields_ == other.unknown_fields_
|
|
&& posted_from_iid_ == other.posted_from_iid_;
|
|
}
|
|
|
|
bool TaskExecution::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* posted_from_iid */:
|
|
field.get(&posted_from_iid_);
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string TaskExecution::SerializeAsString() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> TaskExecution::SerializeAsArray() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void TaskExecution::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: posted_from_iid
|
|
if (_has_field_[1]) {
|
|
msg->AppendVarInt(1, posted_from_iid_);
|
|
}
|
|
|
|
msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
|
|
}
|
|
|
|
} // namespace perfetto
|
|
} // namespace protos
|
|
} // namespace gen
|
|
#pragma GCC diagnostic pop
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/track_event/thread_descriptor.gen.cc
|
|
// gen_amalgamated begin header: gen/protos/perfetto/trace/track_event/thread_descriptor.gen.h
|
|
// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
|
|
#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_THREAD_DESCRIPTOR_PROTO_CPP_H_
|
|
#define PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_THREAD_DESCRIPTOR_PROTO_CPP_H_
|
|
|
|
#include <stdint.h>
|
|
#include <bitset>
|
|
#include <vector>
|
|
#include <string>
|
|
#include <type_traits>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/cpp_message_obj.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/copyable_ptr.h"
|
|
// gen_amalgamated expanded: #include "perfetto/base/export.h"
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace gen {
|
|
class ThreadDescriptor;
|
|
enum ThreadDescriptor_ChromeThreadType : int;
|
|
} // namespace perfetto
|
|
} // namespace protos
|
|
} // namespace gen
|
|
|
|
namespace protozero {
|
|
class Message;
|
|
} // namespace protozero
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace gen {
|
|
enum ThreadDescriptor_ChromeThreadType : int {
|
|
ThreadDescriptor_ChromeThreadType_CHROME_THREAD_UNSPECIFIED = 0,
|
|
ThreadDescriptor_ChromeThreadType_CHROME_THREAD_MAIN = 1,
|
|
ThreadDescriptor_ChromeThreadType_CHROME_THREAD_IO = 2,
|
|
ThreadDescriptor_ChromeThreadType_CHROME_THREAD_POOL_BG_WORKER = 3,
|
|
ThreadDescriptor_ChromeThreadType_CHROME_THREAD_POOL_FG_WORKER = 4,
|
|
ThreadDescriptor_ChromeThreadType_CHROME_THREAD_POOL_FB_BLOCKING = 5,
|
|
ThreadDescriptor_ChromeThreadType_CHROME_THREAD_POOL_BG_BLOCKING = 6,
|
|
ThreadDescriptor_ChromeThreadType_CHROME_THREAD_POOL_SERVICE = 7,
|
|
ThreadDescriptor_ChromeThreadType_CHROME_THREAD_COMPOSITOR = 8,
|
|
ThreadDescriptor_ChromeThreadType_CHROME_THREAD_VIZ_COMPOSITOR = 9,
|
|
ThreadDescriptor_ChromeThreadType_CHROME_THREAD_COMPOSITOR_WORKER = 10,
|
|
ThreadDescriptor_ChromeThreadType_CHROME_THREAD_SERVICE_WORKER = 11,
|
|
ThreadDescriptor_ChromeThreadType_CHROME_THREAD_MEMORY_INFRA = 50,
|
|
ThreadDescriptor_ChromeThreadType_CHROME_THREAD_SAMPLING_PROFILER = 51,
|
|
};
|
|
|
|
class PERFETTO_EXPORT ThreadDescriptor : public ::protozero::CppMessageObj {
|
|
public:
|
|
using ChromeThreadType = ThreadDescriptor_ChromeThreadType;
|
|
static constexpr auto CHROME_THREAD_UNSPECIFIED = ThreadDescriptor_ChromeThreadType_CHROME_THREAD_UNSPECIFIED;
|
|
static constexpr auto CHROME_THREAD_MAIN = ThreadDescriptor_ChromeThreadType_CHROME_THREAD_MAIN;
|
|
static constexpr auto CHROME_THREAD_IO = ThreadDescriptor_ChromeThreadType_CHROME_THREAD_IO;
|
|
static constexpr auto CHROME_THREAD_POOL_BG_WORKER = ThreadDescriptor_ChromeThreadType_CHROME_THREAD_POOL_BG_WORKER;
|
|
static constexpr auto CHROME_THREAD_POOL_FG_WORKER = ThreadDescriptor_ChromeThreadType_CHROME_THREAD_POOL_FG_WORKER;
|
|
static constexpr auto CHROME_THREAD_POOL_FB_BLOCKING = ThreadDescriptor_ChromeThreadType_CHROME_THREAD_POOL_FB_BLOCKING;
|
|
static constexpr auto CHROME_THREAD_POOL_BG_BLOCKING = ThreadDescriptor_ChromeThreadType_CHROME_THREAD_POOL_BG_BLOCKING;
|
|
static constexpr auto CHROME_THREAD_POOL_SERVICE = ThreadDescriptor_ChromeThreadType_CHROME_THREAD_POOL_SERVICE;
|
|
static constexpr auto CHROME_THREAD_COMPOSITOR = ThreadDescriptor_ChromeThreadType_CHROME_THREAD_COMPOSITOR;
|
|
static constexpr auto CHROME_THREAD_VIZ_COMPOSITOR = ThreadDescriptor_ChromeThreadType_CHROME_THREAD_VIZ_COMPOSITOR;
|
|
static constexpr auto CHROME_THREAD_COMPOSITOR_WORKER = ThreadDescriptor_ChromeThreadType_CHROME_THREAD_COMPOSITOR_WORKER;
|
|
static constexpr auto CHROME_THREAD_SERVICE_WORKER = ThreadDescriptor_ChromeThreadType_CHROME_THREAD_SERVICE_WORKER;
|
|
static constexpr auto CHROME_THREAD_MEMORY_INFRA = ThreadDescriptor_ChromeThreadType_CHROME_THREAD_MEMORY_INFRA;
|
|
static constexpr auto CHROME_THREAD_SAMPLING_PROFILER = ThreadDescriptor_ChromeThreadType_CHROME_THREAD_SAMPLING_PROFILER;
|
|
static constexpr auto ChromeThreadType_MIN = ThreadDescriptor_ChromeThreadType_CHROME_THREAD_UNSPECIFIED;
|
|
static constexpr auto ChromeThreadType_MAX = ThreadDescriptor_ChromeThreadType_CHROME_THREAD_SAMPLING_PROFILER;
|
|
enum FieldNumbers {
|
|
kPidFieldNumber = 1,
|
|
kTidFieldNumber = 2,
|
|
kThreadNameFieldNumber = 5,
|
|
kChromeThreadTypeFieldNumber = 4,
|
|
kReferenceTimestampUsFieldNumber = 6,
|
|
kReferenceThreadTimeUsFieldNumber = 7,
|
|
kReferenceThreadInstructionCountFieldNumber = 8,
|
|
kLegacySortIndexFieldNumber = 3,
|
|
};
|
|
|
|
ThreadDescriptor();
|
|
~ThreadDescriptor() override;
|
|
ThreadDescriptor(ThreadDescriptor&&) noexcept;
|
|
ThreadDescriptor& operator=(ThreadDescriptor&&);
|
|
ThreadDescriptor(const ThreadDescriptor&);
|
|
ThreadDescriptor& operator=(const ThreadDescriptor&);
|
|
bool operator==(const ThreadDescriptor&) const;
|
|
bool operator!=(const ThreadDescriptor& other) const { return !(*this == other); }
|
|
|
|
bool ParseFromArray(const void*, size_t) override;
|
|
std::string SerializeAsString() const override;
|
|
std::vector<uint8_t> SerializeAsArray() const override;
|
|
void Serialize(::protozero::Message*) const;
|
|
|
|
bool has_pid() const { return _has_field_[1]; }
|
|
int32_t pid() const { return pid_; }
|
|
void set_pid(int32_t value) { pid_ = value; _has_field_.set(1); }
|
|
|
|
bool has_tid() const { return _has_field_[2]; }
|
|
int32_t tid() const { return tid_; }
|
|
void set_tid(int32_t value) { tid_ = value; _has_field_.set(2); }
|
|
|
|
bool has_thread_name() const { return _has_field_[5]; }
|
|
const std::string& thread_name() const { return thread_name_; }
|
|
void set_thread_name(const std::string& value) { thread_name_ = value; _has_field_.set(5); }
|
|
|
|
bool has_chrome_thread_type() const { return _has_field_[4]; }
|
|
ThreadDescriptor_ChromeThreadType chrome_thread_type() const { return chrome_thread_type_; }
|
|
void set_chrome_thread_type(ThreadDescriptor_ChromeThreadType value) { chrome_thread_type_ = value; _has_field_.set(4); }
|
|
|
|
bool has_reference_timestamp_us() const { return _has_field_[6]; }
|
|
int64_t reference_timestamp_us() const { return reference_timestamp_us_; }
|
|
void set_reference_timestamp_us(int64_t value) { reference_timestamp_us_ = value; _has_field_.set(6); }
|
|
|
|
bool has_reference_thread_time_us() const { return _has_field_[7]; }
|
|
int64_t reference_thread_time_us() const { return reference_thread_time_us_; }
|
|
void set_reference_thread_time_us(int64_t value) { reference_thread_time_us_ = value; _has_field_.set(7); }
|
|
|
|
bool has_reference_thread_instruction_count() const { return _has_field_[8]; }
|
|
int64_t reference_thread_instruction_count() const { return reference_thread_instruction_count_; }
|
|
void set_reference_thread_instruction_count(int64_t value) { reference_thread_instruction_count_ = value; _has_field_.set(8); }
|
|
|
|
bool has_legacy_sort_index() const { return _has_field_[3]; }
|
|
int32_t legacy_sort_index() const { return legacy_sort_index_; }
|
|
void set_legacy_sort_index(int32_t value) { legacy_sort_index_ = value; _has_field_.set(3); }
|
|
|
|
private:
|
|
int32_t pid_{};
|
|
int32_t tid_{};
|
|
std::string thread_name_{};
|
|
ThreadDescriptor_ChromeThreadType chrome_thread_type_{};
|
|
int64_t reference_timestamp_us_{};
|
|
int64_t reference_thread_time_us_{};
|
|
int64_t reference_thread_instruction_count_{};
|
|
int32_t legacy_sort_index_{};
|
|
|
|
// Allows to preserve unknown protobuf fields for compatibility
|
|
// with future versions of .proto files.
|
|
std::string unknown_fields_;
|
|
|
|
std::bitset<9> _has_field_{};
|
|
};
|
|
|
|
} // namespace perfetto
|
|
} // namespace protos
|
|
} // namespace gen
|
|
|
|
#endif // PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_THREAD_DESCRIPTOR_PROTO_CPP_H_
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/scattered_heap_buffer.h"
|
|
// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
|
|
#pragma GCC diagnostic push
|
|
#pragma GCC diagnostic ignored "-Wfloat-equal"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/trace/track_event/thread_descriptor.gen.h"
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace gen {
|
|
|
|
ThreadDescriptor::ThreadDescriptor() = default;
|
|
ThreadDescriptor::~ThreadDescriptor() = default;
|
|
ThreadDescriptor::ThreadDescriptor(const ThreadDescriptor&) = default;
|
|
ThreadDescriptor& ThreadDescriptor::operator=(const ThreadDescriptor&) = default;
|
|
ThreadDescriptor::ThreadDescriptor(ThreadDescriptor&&) noexcept = default;
|
|
ThreadDescriptor& ThreadDescriptor::operator=(ThreadDescriptor&&) = default;
|
|
|
|
bool ThreadDescriptor::operator==(const ThreadDescriptor& other) const {
|
|
return unknown_fields_ == other.unknown_fields_
|
|
&& pid_ == other.pid_
|
|
&& tid_ == other.tid_
|
|
&& thread_name_ == other.thread_name_
|
|
&& chrome_thread_type_ == other.chrome_thread_type_
|
|
&& reference_timestamp_us_ == other.reference_timestamp_us_
|
|
&& reference_thread_time_us_ == other.reference_thread_time_us_
|
|
&& reference_thread_instruction_count_ == other.reference_thread_instruction_count_
|
|
&& legacy_sort_index_ == other.legacy_sort_index_;
|
|
}
|
|
|
|
bool ThreadDescriptor::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* pid */:
|
|
field.get(&pid_);
|
|
break;
|
|
case 2 /* tid */:
|
|
field.get(&tid_);
|
|
break;
|
|
case 5 /* thread_name */:
|
|
field.get(&thread_name_);
|
|
break;
|
|
case 4 /* chrome_thread_type */:
|
|
field.get(&chrome_thread_type_);
|
|
break;
|
|
case 6 /* reference_timestamp_us */:
|
|
field.get(&reference_timestamp_us_);
|
|
break;
|
|
case 7 /* reference_thread_time_us */:
|
|
field.get(&reference_thread_time_us_);
|
|
break;
|
|
case 8 /* reference_thread_instruction_count */:
|
|
field.get(&reference_thread_instruction_count_);
|
|
break;
|
|
case 3 /* legacy_sort_index */:
|
|
field.get(&legacy_sort_index_);
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string ThreadDescriptor::SerializeAsString() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> ThreadDescriptor::SerializeAsArray() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void ThreadDescriptor::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: pid
|
|
if (_has_field_[1]) {
|
|
msg->AppendVarInt(1, pid_);
|
|
}
|
|
|
|
// Field 2: tid
|
|
if (_has_field_[2]) {
|
|
msg->AppendVarInt(2, tid_);
|
|
}
|
|
|
|
// Field 5: thread_name
|
|
if (_has_field_[5]) {
|
|
msg->AppendString(5, thread_name_);
|
|
}
|
|
|
|
// Field 4: chrome_thread_type
|
|
if (_has_field_[4]) {
|
|
msg->AppendVarInt(4, chrome_thread_type_);
|
|
}
|
|
|
|
// Field 6: reference_timestamp_us
|
|
if (_has_field_[6]) {
|
|
msg->AppendVarInt(6, reference_timestamp_us_);
|
|
}
|
|
|
|
// Field 7: reference_thread_time_us
|
|
if (_has_field_[7]) {
|
|
msg->AppendVarInt(7, reference_thread_time_us_);
|
|
}
|
|
|
|
// Field 8: reference_thread_instruction_count
|
|
if (_has_field_[8]) {
|
|
msg->AppendVarInt(8, reference_thread_instruction_count_);
|
|
}
|
|
|
|
// Field 3: legacy_sort_index
|
|
if (_has_field_[3]) {
|
|
msg->AppendVarInt(3, legacy_sort_index_);
|
|
}
|
|
|
|
msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
|
|
}
|
|
|
|
} // namespace perfetto
|
|
} // namespace protos
|
|
} // namespace gen
|
|
#pragma GCC diagnostic pop
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/track_event/track_descriptor.gen.cc
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/scattered_heap_buffer.h"
|
|
// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
|
|
#pragma GCC diagnostic push
|
|
#pragma GCC diagnostic ignored "-Wfloat-equal"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/trace/track_event/track_descriptor.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/trace/track_event/counter_descriptor.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/trace/track_event/thread_descriptor.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/trace/track_event/process_descriptor.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/trace/track_event/chrome_thread_descriptor.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/trace/track_event/chrome_process_descriptor.gen.h"
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace gen {
|
|
|
|
TrackDescriptor::TrackDescriptor() = default;
|
|
TrackDescriptor::~TrackDescriptor() = default;
|
|
TrackDescriptor::TrackDescriptor(const TrackDescriptor&) = default;
|
|
TrackDescriptor& TrackDescriptor::operator=(const TrackDescriptor&) = default;
|
|
TrackDescriptor::TrackDescriptor(TrackDescriptor&&) noexcept = default;
|
|
TrackDescriptor& TrackDescriptor::operator=(TrackDescriptor&&) = default;
|
|
|
|
bool TrackDescriptor::operator==(const TrackDescriptor& other) const {
|
|
return unknown_fields_ == other.unknown_fields_
|
|
&& uuid_ == other.uuid_
|
|
&& parent_uuid_ == other.parent_uuid_
|
|
&& name_ == other.name_
|
|
&& process_ == other.process_
|
|
&& chrome_process_ == other.chrome_process_
|
|
&& thread_ == other.thread_
|
|
&& chrome_thread_ == other.chrome_thread_
|
|
&& counter_ == other.counter_;
|
|
}
|
|
|
|
bool TrackDescriptor::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* uuid */:
|
|
field.get(&uuid_);
|
|
break;
|
|
case 5 /* parent_uuid */:
|
|
field.get(&parent_uuid_);
|
|
break;
|
|
case 2 /* name */:
|
|
field.get(&name_);
|
|
break;
|
|
case 3 /* process */:
|
|
(*process_).ParseFromString(field.as_std_string());
|
|
break;
|
|
case 6 /* chrome_process */:
|
|
(*chrome_process_).ParseFromString(field.as_std_string());
|
|
break;
|
|
case 4 /* thread */:
|
|
(*thread_).ParseFromString(field.as_std_string());
|
|
break;
|
|
case 7 /* chrome_thread */:
|
|
(*chrome_thread_).ParseFromString(field.as_std_string());
|
|
break;
|
|
case 8 /* counter */:
|
|
(*counter_).ParseFromString(field.as_std_string());
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string TrackDescriptor::SerializeAsString() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> TrackDescriptor::SerializeAsArray() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void TrackDescriptor::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: uuid
|
|
if (_has_field_[1]) {
|
|
msg->AppendVarInt(1, uuid_);
|
|
}
|
|
|
|
// Field 5: parent_uuid
|
|
if (_has_field_[5]) {
|
|
msg->AppendVarInt(5, parent_uuid_);
|
|
}
|
|
|
|
// Field 2: name
|
|
if (_has_field_[2]) {
|
|
msg->AppendString(2, name_);
|
|
}
|
|
|
|
// Field 3: process
|
|
if (_has_field_[3]) {
|
|
(*process_).Serialize(msg->BeginNestedMessage<::protozero::Message>(3));
|
|
}
|
|
|
|
// Field 6: chrome_process
|
|
if (_has_field_[6]) {
|
|
(*chrome_process_).Serialize(msg->BeginNestedMessage<::protozero::Message>(6));
|
|
}
|
|
|
|
// Field 4: thread
|
|
if (_has_field_[4]) {
|
|
(*thread_).Serialize(msg->BeginNestedMessage<::protozero::Message>(4));
|
|
}
|
|
|
|
// Field 7: chrome_thread
|
|
if (_has_field_[7]) {
|
|
(*chrome_thread_).Serialize(msg->BeginNestedMessage<::protozero::Message>(7));
|
|
}
|
|
|
|
// Field 8: counter
|
|
if (_has_field_[8]) {
|
|
(*counter_).Serialize(msg->BeginNestedMessage<::protozero::Message>(8));
|
|
}
|
|
|
|
msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
|
|
}
|
|
|
|
} // namespace perfetto
|
|
} // namespace protos
|
|
} // namespace gen
|
|
#pragma GCC diagnostic pop
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/track_event/track_event.gen.cc
|
|
// gen_amalgamated begin header: gen/protos/perfetto/trace/track_event/track_event.gen.h
|
|
// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
|
|
#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_TRACK_EVENT_PROTO_CPP_H_
|
|
#define PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_TRACK_EVENT_PROTO_CPP_H_
|
|
|
|
#include <stdint.h>
|
|
#include <bitset>
|
|
#include <vector>
|
|
#include <string>
|
|
#include <type_traits>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/cpp_message_obj.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/copyable_ptr.h"
|
|
// gen_amalgamated expanded: #include "perfetto/base/export.h"
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace gen {
|
|
class EventName;
|
|
class EventCategory;
|
|
class TrackEventDefaults;
|
|
class TrackEvent;
|
|
class TrackEvent_LegacyEvent;
|
|
class ChromeFrameReporter;
|
|
class ChromeLatencyInfo;
|
|
class ChromeLatencyInfo_ComponentInfo;
|
|
class ChromeHistogramSample;
|
|
class ChromeLegacyIpc;
|
|
class ChromeKeyedService;
|
|
class ChromeUserEvent;
|
|
class ChromeCompositorSchedulerState;
|
|
class CompositorTimingHistory;
|
|
class BeginFrameSourceState;
|
|
class BeginFrameArgs;
|
|
class SourceLocation;
|
|
class BeginFrameObserverState;
|
|
class BeginImplFrameArgs;
|
|
class BeginImplFrameArgs_TimestampsInUs;
|
|
class ChromeCompositorStateMachine;
|
|
class ChromeCompositorStateMachine_MinorState;
|
|
class ChromeCompositorStateMachine_MajorState;
|
|
class LogMessage;
|
|
class TaskExecution;
|
|
class DebugAnnotation;
|
|
class DebugAnnotation_NestedValue;
|
|
enum TrackEvent_Type : int;
|
|
enum TrackEvent_LegacyEvent_FlowDirection : int;
|
|
enum TrackEvent_LegacyEvent_InstantEventScope : int;
|
|
enum ChromeFrameReporter_State : int;
|
|
enum ChromeFrameReporter_FrameDropReason : int;
|
|
enum ChromeLatencyInfo_Step : int;
|
|
enum ChromeLatencyInfo_LatencyComponentType : int;
|
|
enum ChromeLegacyIpc_MessageClass : int;
|
|
enum ChromeCompositorSchedulerState_BeginImplFrameDeadlineMode : int;
|
|
enum ChromeCompositorSchedulerAction : int;
|
|
enum BeginFrameArgs_BeginFrameArgsType : int;
|
|
enum BeginImplFrameArgs_State : int;
|
|
enum ChromeCompositorStateMachine_MinorState_TreePriority : int;
|
|
enum ChromeCompositorStateMachine_MinorState_ScrollHandlerState : int;
|
|
enum ChromeCompositorStateMachine_MajorState_BeginImplFrameState : int;
|
|
enum ChromeCompositorStateMachine_MajorState_BeginMainFrameState : int;
|
|
enum ChromeCompositorStateMachine_MajorState_LayerTreeFrameSinkState : int;
|
|
enum ChromeCompositorStateMachine_MajorState_ForcedRedrawOnTimeoutState : int;
|
|
enum DebugAnnotation_NestedValue_NestedType : int;
|
|
} // namespace perfetto
|
|
} // namespace protos
|
|
} // namespace gen
|
|
|
|
namespace protozero {
|
|
class Message;
|
|
} // namespace protozero
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace gen {
|
|
enum TrackEvent_Type : int {
|
|
TrackEvent_Type_TYPE_UNSPECIFIED = 0,
|
|
TrackEvent_Type_TYPE_SLICE_BEGIN = 1,
|
|
TrackEvent_Type_TYPE_SLICE_END = 2,
|
|
TrackEvent_Type_TYPE_INSTANT = 3,
|
|
TrackEvent_Type_TYPE_COUNTER = 4,
|
|
};
|
|
enum TrackEvent_LegacyEvent_FlowDirection : int {
|
|
TrackEvent_LegacyEvent_FlowDirection_FLOW_UNSPECIFIED = 0,
|
|
TrackEvent_LegacyEvent_FlowDirection_FLOW_IN = 1,
|
|
TrackEvent_LegacyEvent_FlowDirection_FLOW_OUT = 2,
|
|
TrackEvent_LegacyEvent_FlowDirection_FLOW_INOUT = 3,
|
|
};
|
|
enum TrackEvent_LegacyEvent_InstantEventScope : int {
|
|
TrackEvent_LegacyEvent_InstantEventScope_SCOPE_UNSPECIFIED = 0,
|
|
TrackEvent_LegacyEvent_InstantEventScope_SCOPE_GLOBAL = 1,
|
|
TrackEvent_LegacyEvent_InstantEventScope_SCOPE_PROCESS = 2,
|
|
TrackEvent_LegacyEvent_InstantEventScope_SCOPE_THREAD = 3,
|
|
};
|
|
|
|
class PERFETTO_EXPORT EventName : public ::protozero::CppMessageObj {
|
|
public:
|
|
enum FieldNumbers {
|
|
kIidFieldNumber = 1,
|
|
kNameFieldNumber = 2,
|
|
};
|
|
|
|
EventName();
|
|
~EventName() override;
|
|
EventName(EventName&&) noexcept;
|
|
EventName& operator=(EventName&&);
|
|
EventName(const EventName&);
|
|
EventName& operator=(const EventName&);
|
|
bool operator==(const EventName&) const;
|
|
bool operator!=(const EventName& other) const { return !(*this == other); }
|
|
|
|
bool ParseFromArray(const void*, size_t) override;
|
|
std::string SerializeAsString() const override;
|
|
std::vector<uint8_t> SerializeAsArray() const override;
|
|
void Serialize(::protozero::Message*) const;
|
|
|
|
bool has_iid() const { return _has_field_[1]; }
|
|
uint64_t iid() const { return iid_; }
|
|
void set_iid(uint64_t value) { iid_ = value; _has_field_.set(1); }
|
|
|
|
bool has_name() const { return _has_field_[2]; }
|
|
const std::string& name() const { return name_; }
|
|
void set_name(const std::string& value) { name_ = value; _has_field_.set(2); }
|
|
|
|
private:
|
|
uint64_t iid_{};
|
|
std::string name_{};
|
|
|
|
// Allows to preserve unknown protobuf fields for compatibility
|
|
// with future versions of .proto files.
|
|
std::string unknown_fields_;
|
|
|
|
std::bitset<3> _has_field_{};
|
|
};
|
|
|
|
|
|
class PERFETTO_EXPORT EventCategory : public ::protozero::CppMessageObj {
|
|
public:
|
|
enum FieldNumbers {
|
|
kIidFieldNumber = 1,
|
|
kNameFieldNumber = 2,
|
|
};
|
|
|
|
EventCategory();
|
|
~EventCategory() override;
|
|
EventCategory(EventCategory&&) noexcept;
|
|
EventCategory& operator=(EventCategory&&);
|
|
EventCategory(const EventCategory&);
|
|
EventCategory& operator=(const EventCategory&);
|
|
bool operator==(const EventCategory&) const;
|
|
bool operator!=(const EventCategory& other) const { return !(*this == other); }
|
|
|
|
bool ParseFromArray(const void*, size_t) override;
|
|
std::string SerializeAsString() const override;
|
|
std::vector<uint8_t> SerializeAsArray() const override;
|
|
void Serialize(::protozero::Message*) const;
|
|
|
|
bool has_iid() const { return _has_field_[1]; }
|
|
uint64_t iid() const { return iid_; }
|
|
void set_iid(uint64_t value) { iid_ = value; _has_field_.set(1); }
|
|
|
|
bool has_name() const { return _has_field_[2]; }
|
|
const std::string& name() const { return name_; }
|
|
void set_name(const std::string& value) { name_ = value; _has_field_.set(2); }
|
|
|
|
private:
|
|
uint64_t iid_{};
|
|
std::string name_{};
|
|
|
|
// Allows to preserve unknown protobuf fields for compatibility
|
|
// with future versions of .proto files.
|
|
std::string unknown_fields_;
|
|
|
|
std::bitset<3> _has_field_{};
|
|
};
|
|
|
|
|
|
class PERFETTO_EXPORT TrackEventDefaults : public ::protozero::CppMessageObj {
|
|
public:
|
|
enum FieldNumbers {
|
|
kTrackUuidFieldNumber = 11,
|
|
kExtraCounterTrackUuidsFieldNumber = 31,
|
|
};
|
|
|
|
TrackEventDefaults();
|
|
~TrackEventDefaults() override;
|
|
TrackEventDefaults(TrackEventDefaults&&) noexcept;
|
|
TrackEventDefaults& operator=(TrackEventDefaults&&);
|
|
TrackEventDefaults(const TrackEventDefaults&);
|
|
TrackEventDefaults& operator=(const TrackEventDefaults&);
|
|
bool operator==(const TrackEventDefaults&) const;
|
|
bool operator!=(const TrackEventDefaults& other) const { return !(*this == other); }
|
|
|
|
bool ParseFromArray(const void*, size_t) override;
|
|
std::string SerializeAsString() const override;
|
|
std::vector<uint8_t> SerializeAsArray() const override;
|
|
void Serialize(::protozero::Message*) const;
|
|
|
|
bool has_track_uuid() const { return _has_field_[11]; }
|
|
uint64_t track_uuid() const { return track_uuid_; }
|
|
void set_track_uuid(uint64_t value) { track_uuid_ = value; _has_field_.set(11); }
|
|
|
|
int extra_counter_track_uuids_size() const { return static_cast<int>(extra_counter_track_uuids_.size()); }
|
|
const std::vector<uint64_t>& extra_counter_track_uuids() const { return extra_counter_track_uuids_; }
|
|
std::vector<uint64_t>* mutable_extra_counter_track_uuids() { return &extra_counter_track_uuids_; }
|
|
void clear_extra_counter_track_uuids() { extra_counter_track_uuids_.clear(); }
|
|
void add_extra_counter_track_uuids(uint64_t value) { extra_counter_track_uuids_.emplace_back(value); }
|
|
uint64_t* add_extra_counter_track_uuids() { extra_counter_track_uuids_.emplace_back(); return &extra_counter_track_uuids_.back(); }
|
|
|
|
private:
|
|
uint64_t track_uuid_{};
|
|
std::vector<uint64_t> extra_counter_track_uuids_;
|
|
|
|
// Allows to preserve unknown protobuf fields for compatibility
|
|
// with future versions of .proto files.
|
|
std::string unknown_fields_;
|
|
|
|
std::bitset<32> _has_field_{};
|
|
};
|
|
|
|
|
|
class PERFETTO_EXPORT TrackEvent : public ::protozero::CppMessageObj {
|
|
public:
|
|
using LegacyEvent = TrackEvent_LegacyEvent;
|
|
using Type = TrackEvent_Type;
|
|
static constexpr auto TYPE_UNSPECIFIED = TrackEvent_Type_TYPE_UNSPECIFIED;
|
|
static constexpr auto TYPE_SLICE_BEGIN = TrackEvent_Type_TYPE_SLICE_BEGIN;
|
|
static constexpr auto TYPE_SLICE_END = TrackEvent_Type_TYPE_SLICE_END;
|
|
static constexpr auto TYPE_INSTANT = TrackEvent_Type_TYPE_INSTANT;
|
|
static constexpr auto TYPE_COUNTER = TrackEvent_Type_TYPE_COUNTER;
|
|
static constexpr auto Type_MIN = TrackEvent_Type_TYPE_UNSPECIFIED;
|
|
static constexpr auto Type_MAX = TrackEvent_Type_TYPE_COUNTER;
|
|
enum FieldNumbers {
|
|
kCategoryIidsFieldNumber = 3,
|
|
kCategoriesFieldNumber = 22,
|
|
kNameIidFieldNumber = 10,
|
|
kNameFieldNumber = 23,
|
|
kTypeFieldNumber = 9,
|
|
kTrackUuidFieldNumber = 11,
|
|
kCounterValueFieldNumber = 30,
|
|
kExtraCounterTrackUuidsFieldNumber = 31,
|
|
kExtraCounterValuesFieldNumber = 12,
|
|
kDebugAnnotationsFieldNumber = 4,
|
|
kTaskExecutionFieldNumber = 5,
|
|
kLogMessageFieldNumber = 21,
|
|
kCcSchedulerStateFieldNumber = 24,
|
|
kChromeUserEventFieldNumber = 25,
|
|
kChromeKeyedServiceFieldNumber = 26,
|
|
kChromeLegacyIpcFieldNumber = 27,
|
|
kChromeHistogramSampleFieldNumber = 28,
|
|
kChromeLatencyInfoFieldNumber = 29,
|
|
kChromeFrameReporterFieldNumber = 32,
|
|
kTimestampDeltaUsFieldNumber = 1,
|
|
kTimestampAbsoluteUsFieldNumber = 16,
|
|
kThreadTimeDeltaUsFieldNumber = 2,
|
|
kThreadTimeAbsoluteUsFieldNumber = 17,
|
|
kThreadInstructionCountDeltaFieldNumber = 8,
|
|
kThreadInstructionCountAbsoluteFieldNumber = 20,
|
|
kLegacyEventFieldNumber = 6,
|
|
};
|
|
|
|
TrackEvent();
|
|
~TrackEvent() override;
|
|
TrackEvent(TrackEvent&&) noexcept;
|
|
TrackEvent& operator=(TrackEvent&&);
|
|
TrackEvent(const TrackEvent&);
|
|
TrackEvent& operator=(const TrackEvent&);
|
|
bool operator==(const TrackEvent&) const;
|
|
bool operator!=(const TrackEvent& other) const { return !(*this == other); }
|
|
|
|
bool ParseFromArray(const void*, size_t) override;
|
|
std::string SerializeAsString() const override;
|
|
std::vector<uint8_t> SerializeAsArray() const override;
|
|
void Serialize(::protozero::Message*) const;
|
|
|
|
int category_iids_size() const { return static_cast<int>(category_iids_.size()); }
|
|
const std::vector<uint64_t>& category_iids() const { return category_iids_; }
|
|
std::vector<uint64_t>* mutable_category_iids() { return &category_iids_; }
|
|
void clear_category_iids() { category_iids_.clear(); }
|
|
void add_category_iids(uint64_t value) { category_iids_.emplace_back(value); }
|
|
uint64_t* add_category_iids() { category_iids_.emplace_back(); return &category_iids_.back(); }
|
|
|
|
int categories_size() const { return static_cast<int>(categories_.size()); }
|
|
const std::vector<std::string>& categories() const { return categories_; }
|
|
std::vector<std::string>* mutable_categories() { return &categories_; }
|
|
void clear_categories() { categories_.clear(); }
|
|
void add_categories(std::string value) { categories_.emplace_back(value); }
|
|
std::string* add_categories() { categories_.emplace_back(); return &categories_.back(); }
|
|
|
|
bool has_name_iid() const { return _has_field_[10]; }
|
|
uint64_t name_iid() const { return name_iid_; }
|
|
void set_name_iid(uint64_t value) { name_iid_ = value; _has_field_.set(10); }
|
|
|
|
bool has_name() const { return _has_field_[23]; }
|
|
const std::string& name() const { return name_; }
|
|
void set_name(const std::string& value) { name_ = value; _has_field_.set(23); }
|
|
|
|
bool has_type() const { return _has_field_[9]; }
|
|
TrackEvent_Type type() const { return type_; }
|
|
void set_type(TrackEvent_Type value) { type_ = value; _has_field_.set(9); }
|
|
|
|
bool has_track_uuid() const { return _has_field_[11]; }
|
|
uint64_t track_uuid() const { return track_uuid_; }
|
|
void set_track_uuid(uint64_t value) { track_uuid_ = value; _has_field_.set(11); }
|
|
|
|
bool has_counter_value() const { return _has_field_[30]; }
|
|
int64_t counter_value() const { return counter_value_; }
|
|
void set_counter_value(int64_t value) { counter_value_ = value; _has_field_.set(30); }
|
|
|
|
int extra_counter_track_uuids_size() const { return static_cast<int>(extra_counter_track_uuids_.size()); }
|
|
const std::vector<uint64_t>& extra_counter_track_uuids() const { return extra_counter_track_uuids_; }
|
|
std::vector<uint64_t>* mutable_extra_counter_track_uuids() { return &extra_counter_track_uuids_; }
|
|
void clear_extra_counter_track_uuids() { extra_counter_track_uuids_.clear(); }
|
|
void add_extra_counter_track_uuids(uint64_t value) { extra_counter_track_uuids_.emplace_back(value); }
|
|
uint64_t* add_extra_counter_track_uuids() { extra_counter_track_uuids_.emplace_back(); return &extra_counter_track_uuids_.back(); }
|
|
|
|
int extra_counter_values_size() const { return static_cast<int>(extra_counter_values_.size()); }
|
|
const std::vector<int64_t>& extra_counter_values() const { return extra_counter_values_; }
|
|
std::vector<int64_t>* mutable_extra_counter_values() { return &extra_counter_values_; }
|
|
void clear_extra_counter_values() { extra_counter_values_.clear(); }
|
|
void add_extra_counter_values(int64_t value) { extra_counter_values_.emplace_back(value); }
|
|
int64_t* add_extra_counter_values() { extra_counter_values_.emplace_back(); return &extra_counter_values_.back(); }
|
|
|
|
int debug_annotations_size() const { return static_cast<int>(debug_annotations_.size()); }
|
|
const std::vector<DebugAnnotation>& debug_annotations() const { return debug_annotations_; }
|
|
std::vector<DebugAnnotation>* mutable_debug_annotations() { return &debug_annotations_; }
|
|
void clear_debug_annotations() { debug_annotations_.clear(); }
|
|
DebugAnnotation* add_debug_annotations() { debug_annotations_.emplace_back(); return &debug_annotations_.back(); }
|
|
|
|
bool has_task_execution() const { return _has_field_[5]; }
|
|
const TaskExecution& task_execution() const { return *task_execution_; }
|
|
TaskExecution* mutable_task_execution() { _has_field_.set(5); return task_execution_.get(); }
|
|
|
|
bool has_log_message() const { return _has_field_[21]; }
|
|
const LogMessage& log_message() const { return *log_message_; }
|
|
LogMessage* mutable_log_message() { _has_field_.set(21); return log_message_.get(); }
|
|
|
|
bool has_cc_scheduler_state() const { return _has_field_[24]; }
|
|
const ChromeCompositorSchedulerState& cc_scheduler_state() const { return *cc_scheduler_state_; }
|
|
ChromeCompositorSchedulerState* mutable_cc_scheduler_state() { _has_field_.set(24); return cc_scheduler_state_.get(); }
|
|
|
|
bool has_chrome_user_event() const { return _has_field_[25]; }
|
|
const ChromeUserEvent& chrome_user_event() const { return *chrome_user_event_; }
|
|
ChromeUserEvent* mutable_chrome_user_event() { _has_field_.set(25); return chrome_user_event_.get(); }
|
|
|
|
bool has_chrome_keyed_service() const { return _has_field_[26]; }
|
|
const ChromeKeyedService& chrome_keyed_service() const { return *chrome_keyed_service_; }
|
|
ChromeKeyedService* mutable_chrome_keyed_service() { _has_field_.set(26); return chrome_keyed_service_.get(); }
|
|
|
|
bool has_chrome_legacy_ipc() const { return _has_field_[27]; }
|
|
const ChromeLegacyIpc& chrome_legacy_ipc() const { return *chrome_legacy_ipc_; }
|
|
ChromeLegacyIpc* mutable_chrome_legacy_ipc() { _has_field_.set(27); return chrome_legacy_ipc_.get(); }
|
|
|
|
bool has_chrome_histogram_sample() const { return _has_field_[28]; }
|
|
const ChromeHistogramSample& chrome_histogram_sample() const { return *chrome_histogram_sample_; }
|
|
ChromeHistogramSample* mutable_chrome_histogram_sample() { _has_field_.set(28); return chrome_histogram_sample_.get(); }
|
|
|
|
bool has_chrome_latency_info() const { return _has_field_[29]; }
|
|
const ChromeLatencyInfo& chrome_latency_info() const { return *chrome_latency_info_; }
|
|
ChromeLatencyInfo* mutable_chrome_latency_info() { _has_field_.set(29); return chrome_latency_info_.get(); }
|
|
|
|
bool has_chrome_frame_reporter() const { return _has_field_[32]; }
|
|
const ChromeFrameReporter& chrome_frame_reporter() const { return *chrome_frame_reporter_; }
|
|
ChromeFrameReporter* mutable_chrome_frame_reporter() { _has_field_.set(32); return chrome_frame_reporter_.get(); }
|
|
|
|
bool has_timestamp_delta_us() const { return _has_field_[1]; }
|
|
int64_t timestamp_delta_us() const { return timestamp_delta_us_; }
|
|
void set_timestamp_delta_us(int64_t value) { timestamp_delta_us_ = value; _has_field_.set(1); }
|
|
|
|
bool has_timestamp_absolute_us() const { return _has_field_[16]; }
|
|
int64_t timestamp_absolute_us() const { return timestamp_absolute_us_; }
|
|
void set_timestamp_absolute_us(int64_t value) { timestamp_absolute_us_ = value; _has_field_.set(16); }
|
|
|
|
bool has_thread_time_delta_us() const { return _has_field_[2]; }
|
|
int64_t thread_time_delta_us() const { return thread_time_delta_us_; }
|
|
void set_thread_time_delta_us(int64_t value) { thread_time_delta_us_ = value; _has_field_.set(2); }
|
|
|
|
bool has_thread_time_absolute_us() const { return _has_field_[17]; }
|
|
int64_t thread_time_absolute_us() const { return thread_time_absolute_us_; }
|
|
void set_thread_time_absolute_us(int64_t value) { thread_time_absolute_us_ = value; _has_field_.set(17); }
|
|
|
|
bool has_thread_instruction_count_delta() const { return _has_field_[8]; }
|
|
int64_t thread_instruction_count_delta() const { return thread_instruction_count_delta_; }
|
|
void set_thread_instruction_count_delta(int64_t value) { thread_instruction_count_delta_ = value; _has_field_.set(8); }
|
|
|
|
bool has_thread_instruction_count_absolute() const { return _has_field_[20]; }
|
|
int64_t thread_instruction_count_absolute() const { return thread_instruction_count_absolute_; }
|
|
void set_thread_instruction_count_absolute(int64_t value) { thread_instruction_count_absolute_ = value; _has_field_.set(20); }
|
|
|
|
bool has_legacy_event() const { return _has_field_[6]; }
|
|
const TrackEvent_LegacyEvent& legacy_event() const { return *legacy_event_; }
|
|
TrackEvent_LegacyEvent* mutable_legacy_event() { _has_field_.set(6); return legacy_event_.get(); }
|
|
|
|
private:
|
|
std::vector<uint64_t> category_iids_;
|
|
std::vector<std::string> categories_;
|
|
uint64_t name_iid_{};
|
|
std::string name_{};
|
|
TrackEvent_Type type_{};
|
|
uint64_t track_uuid_{};
|
|
int64_t counter_value_{};
|
|
std::vector<uint64_t> extra_counter_track_uuids_;
|
|
std::vector<int64_t> extra_counter_values_;
|
|
std::vector<DebugAnnotation> debug_annotations_;
|
|
::protozero::CopyablePtr<TaskExecution> task_execution_;
|
|
::protozero::CopyablePtr<LogMessage> log_message_;
|
|
::protozero::CopyablePtr<ChromeCompositorSchedulerState> cc_scheduler_state_;
|
|
::protozero::CopyablePtr<ChromeUserEvent> chrome_user_event_;
|
|
::protozero::CopyablePtr<ChromeKeyedService> chrome_keyed_service_;
|
|
::protozero::CopyablePtr<ChromeLegacyIpc> chrome_legacy_ipc_;
|
|
::protozero::CopyablePtr<ChromeHistogramSample> chrome_histogram_sample_;
|
|
::protozero::CopyablePtr<ChromeLatencyInfo> chrome_latency_info_;
|
|
::protozero::CopyablePtr<ChromeFrameReporter> chrome_frame_reporter_;
|
|
int64_t timestamp_delta_us_{};
|
|
int64_t timestamp_absolute_us_{};
|
|
int64_t thread_time_delta_us_{};
|
|
int64_t thread_time_absolute_us_{};
|
|
int64_t thread_instruction_count_delta_{};
|
|
int64_t thread_instruction_count_absolute_{};
|
|
::protozero::CopyablePtr<TrackEvent_LegacyEvent> legacy_event_;
|
|
|
|
// Allows to preserve unknown protobuf fields for compatibility
|
|
// with future versions of .proto files.
|
|
std::string unknown_fields_;
|
|
|
|
std::bitset<33> _has_field_{};
|
|
};
|
|
|
|
|
|
class PERFETTO_EXPORT TrackEvent_LegacyEvent : public ::protozero::CppMessageObj {
|
|
public:
|
|
using FlowDirection = TrackEvent_LegacyEvent_FlowDirection;
|
|
static constexpr auto FLOW_UNSPECIFIED = TrackEvent_LegacyEvent_FlowDirection_FLOW_UNSPECIFIED;
|
|
static constexpr auto FLOW_IN = TrackEvent_LegacyEvent_FlowDirection_FLOW_IN;
|
|
static constexpr auto FLOW_OUT = TrackEvent_LegacyEvent_FlowDirection_FLOW_OUT;
|
|
static constexpr auto FLOW_INOUT = TrackEvent_LegacyEvent_FlowDirection_FLOW_INOUT;
|
|
static constexpr auto FlowDirection_MIN = TrackEvent_LegacyEvent_FlowDirection_FLOW_UNSPECIFIED;
|
|
static constexpr auto FlowDirection_MAX = TrackEvent_LegacyEvent_FlowDirection_FLOW_INOUT;
|
|
using InstantEventScope = TrackEvent_LegacyEvent_InstantEventScope;
|
|
static constexpr auto SCOPE_UNSPECIFIED = TrackEvent_LegacyEvent_InstantEventScope_SCOPE_UNSPECIFIED;
|
|
static constexpr auto SCOPE_GLOBAL = TrackEvent_LegacyEvent_InstantEventScope_SCOPE_GLOBAL;
|
|
static constexpr auto SCOPE_PROCESS = TrackEvent_LegacyEvent_InstantEventScope_SCOPE_PROCESS;
|
|
static constexpr auto SCOPE_THREAD = TrackEvent_LegacyEvent_InstantEventScope_SCOPE_THREAD;
|
|
static constexpr auto InstantEventScope_MIN = TrackEvent_LegacyEvent_InstantEventScope_SCOPE_UNSPECIFIED;
|
|
static constexpr auto InstantEventScope_MAX = TrackEvent_LegacyEvent_InstantEventScope_SCOPE_THREAD;
|
|
enum FieldNumbers {
|
|
kNameIidFieldNumber = 1,
|
|
kPhaseFieldNumber = 2,
|
|
kDurationUsFieldNumber = 3,
|
|
kThreadDurationUsFieldNumber = 4,
|
|
kThreadInstructionDeltaFieldNumber = 15,
|
|
kUnscopedIdFieldNumber = 6,
|
|
kLocalIdFieldNumber = 10,
|
|
kGlobalIdFieldNumber = 11,
|
|
kIdScopeFieldNumber = 7,
|
|
kUseAsyncTtsFieldNumber = 9,
|
|
kBindIdFieldNumber = 8,
|
|
kBindToEnclosingFieldNumber = 12,
|
|
kFlowDirectionFieldNumber = 13,
|
|
kInstantEventScopeFieldNumber = 14,
|
|
kPidOverrideFieldNumber = 18,
|
|
kTidOverrideFieldNumber = 19,
|
|
};
|
|
|
|
TrackEvent_LegacyEvent();
|
|
~TrackEvent_LegacyEvent() override;
|
|
TrackEvent_LegacyEvent(TrackEvent_LegacyEvent&&) noexcept;
|
|
TrackEvent_LegacyEvent& operator=(TrackEvent_LegacyEvent&&);
|
|
TrackEvent_LegacyEvent(const TrackEvent_LegacyEvent&);
|
|
TrackEvent_LegacyEvent& operator=(const TrackEvent_LegacyEvent&);
|
|
bool operator==(const TrackEvent_LegacyEvent&) const;
|
|
bool operator!=(const TrackEvent_LegacyEvent& other) const { return !(*this == other); }
|
|
|
|
bool ParseFromArray(const void*, size_t) override;
|
|
std::string SerializeAsString() const override;
|
|
std::vector<uint8_t> SerializeAsArray() const override;
|
|
void Serialize(::protozero::Message*) const;
|
|
|
|
bool has_name_iid() const { return _has_field_[1]; }
|
|
uint64_t name_iid() const { return name_iid_; }
|
|
void set_name_iid(uint64_t value) { name_iid_ = value; _has_field_.set(1); }
|
|
|
|
bool has_phase() const { return _has_field_[2]; }
|
|
int32_t phase() const { return phase_; }
|
|
void set_phase(int32_t value) { phase_ = value; _has_field_.set(2); }
|
|
|
|
bool has_duration_us() const { return _has_field_[3]; }
|
|
int64_t duration_us() const { return duration_us_; }
|
|
void set_duration_us(int64_t value) { duration_us_ = value; _has_field_.set(3); }
|
|
|
|
bool has_thread_duration_us() const { return _has_field_[4]; }
|
|
int64_t thread_duration_us() const { return thread_duration_us_; }
|
|
void set_thread_duration_us(int64_t value) { thread_duration_us_ = value; _has_field_.set(4); }
|
|
|
|
bool has_thread_instruction_delta() const { return _has_field_[15]; }
|
|
int64_t thread_instruction_delta() const { return thread_instruction_delta_; }
|
|
void set_thread_instruction_delta(int64_t value) { thread_instruction_delta_ = value; _has_field_.set(15); }
|
|
|
|
bool has_unscoped_id() const { return _has_field_[6]; }
|
|
uint64_t unscoped_id() const { return unscoped_id_; }
|
|
void set_unscoped_id(uint64_t value) { unscoped_id_ = value; _has_field_.set(6); }
|
|
|
|
bool has_local_id() const { return _has_field_[10]; }
|
|
uint64_t local_id() const { return local_id_; }
|
|
void set_local_id(uint64_t value) { local_id_ = value; _has_field_.set(10); }
|
|
|
|
bool has_global_id() const { return _has_field_[11]; }
|
|
uint64_t global_id() const { return global_id_; }
|
|
void set_global_id(uint64_t value) { global_id_ = value; _has_field_.set(11); }
|
|
|
|
bool has_id_scope() const { return _has_field_[7]; }
|
|
const std::string& id_scope() const { return id_scope_; }
|
|
void set_id_scope(const std::string& value) { id_scope_ = value; _has_field_.set(7); }
|
|
|
|
bool has_use_async_tts() const { return _has_field_[9]; }
|
|
bool use_async_tts() const { return use_async_tts_; }
|
|
void set_use_async_tts(bool value) { use_async_tts_ = value; _has_field_.set(9); }
|
|
|
|
bool has_bind_id() const { return _has_field_[8]; }
|
|
uint64_t bind_id() const { return bind_id_; }
|
|
void set_bind_id(uint64_t value) { bind_id_ = value; _has_field_.set(8); }
|
|
|
|
bool has_bind_to_enclosing() const { return _has_field_[12]; }
|
|
bool bind_to_enclosing() const { return bind_to_enclosing_; }
|
|
void set_bind_to_enclosing(bool value) { bind_to_enclosing_ = value; _has_field_.set(12); }
|
|
|
|
bool has_flow_direction() const { return _has_field_[13]; }
|
|
TrackEvent_LegacyEvent_FlowDirection flow_direction() const { return flow_direction_; }
|
|
void set_flow_direction(TrackEvent_LegacyEvent_FlowDirection value) { flow_direction_ = value; _has_field_.set(13); }
|
|
|
|
bool has_instant_event_scope() const { return _has_field_[14]; }
|
|
TrackEvent_LegacyEvent_InstantEventScope instant_event_scope() const { return instant_event_scope_; }
|
|
void set_instant_event_scope(TrackEvent_LegacyEvent_InstantEventScope value) { instant_event_scope_ = value; _has_field_.set(14); }
|
|
|
|
bool has_pid_override() const { return _has_field_[18]; }
|
|
int32_t pid_override() const { return pid_override_; }
|
|
void set_pid_override(int32_t value) { pid_override_ = value; _has_field_.set(18); }
|
|
|
|
bool has_tid_override() const { return _has_field_[19]; }
|
|
int32_t tid_override() const { return tid_override_; }
|
|
void set_tid_override(int32_t value) { tid_override_ = value; _has_field_.set(19); }
|
|
|
|
private:
|
|
uint64_t name_iid_{};
|
|
int32_t phase_{};
|
|
int64_t duration_us_{};
|
|
int64_t thread_duration_us_{};
|
|
int64_t thread_instruction_delta_{};
|
|
uint64_t unscoped_id_{};
|
|
uint64_t local_id_{};
|
|
uint64_t global_id_{};
|
|
std::string id_scope_{};
|
|
bool use_async_tts_{};
|
|
uint64_t bind_id_{};
|
|
bool bind_to_enclosing_{};
|
|
TrackEvent_LegacyEvent_FlowDirection flow_direction_{};
|
|
TrackEvent_LegacyEvent_InstantEventScope instant_event_scope_{};
|
|
int32_t pid_override_{};
|
|
int32_t tid_override_{};
|
|
|
|
// Allows to preserve unknown protobuf fields for compatibility
|
|
// with future versions of .proto files.
|
|
std::string unknown_fields_;
|
|
|
|
std::bitset<20> _has_field_{};
|
|
};
|
|
|
|
} // namespace perfetto
|
|
} // namespace protos
|
|
} // namespace gen
|
|
|
|
#endif // PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_TRACK_EVENT_PROTO_CPP_H_
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/scattered_heap_buffer.h"
|
|
// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
|
|
#pragma GCC diagnostic push
|
|
#pragma GCC diagnostic ignored "-Wfloat-equal"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/trace/track_event/track_event.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/trace/track_event/chrome_user_event.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/trace/track_event/chrome_legacy_ipc.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/trace/track_event/chrome_latency_info.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/trace/track_event/chrome_keyed_service.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/trace/track_event/chrome_histogram_sample.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/trace/track_event/chrome_frame_reporter.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/trace/track_event/chrome_compositor_scheduler_state.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/trace/track_event/source_location.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/trace/track_event/task_execution.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/trace/track_event/log_message.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/trace/track_event/debug_annotation.gen.h"
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace gen {
|
|
|
|
EventName::EventName() = default;
|
|
EventName::~EventName() = default;
|
|
EventName::EventName(const EventName&) = default;
|
|
EventName& EventName::operator=(const EventName&) = default;
|
|
EventName::EventName(EventName&&) noexcept = default;
|
|
EventName& EventName::operator=(EventName&&) = default;
|
|
|
|
bool EventName::operator==(const EventName& other) const {
|
|
return unknown_fields_ == other.unknown_fields_
|
|
&& iid_ == other.iid_
|
|
&& name_ == other.name_;
|
|
}
|
|
|
|
bool EventName::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* iid */:
|
|
field.get(&iid_);
|
|
break;
|
|
case 2 /* name */:
|
|
field.get(&name_);
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string EventName::SerializeAsString() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> EventName::SerializeAsArray() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void EventName::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: iid
|
|
if (_has_field_[1]) {
|
|
msg->AppendVarInt(1, iid_);
|
|
}
|
|
|
|
// Field 2: name
|
|
if (_has_field_[2]) {
|
|
msg->AppendString(2, name_);
|
|
}
|
|
|
|
msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
|
|
}
|
|
|
|
|
|
EventCategory::EventCategory() = default;
|
|
EventCategory::~EventCategory() = default;
|
|
EventCategory::EventCategory(const EventCategory&) = default;
|
|
EventCategory& EventCategory::operator=(const EventCategory&) = default;
|
|
EventCategory::EventCategory(EventCategory&&) noexcept = default;
|
|
EventCategory& EventCategory::operator=(EventCategory&&) = default;
|
|
|
|
bool EventCategory::operator==(const EventCategory& other) const {
|
|
return unknown_fields_ == other.unknown_fields_
|
|
&& iid_ == other.iid_
|
|
&& name_ == other.name_;
|
|
}
|
|
|
|
bool EventCategory::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* iid */:
|
|
field.get(&iid_);
|
|
break;
|
|
case 2 /* name */:
|
|
field.get(&name_);
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string EventCategory::SerializeAsString() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> EventCategory::SerializeAsArray() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void EventCategory::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: iid
|
|
if (_has_field_[1]) {
|
|
msg->AppendVarInt(1, iid_);
|
|
}
|
|
|
|
// Field 2: name
|
|
if (_has_field_[2]) {
|
|
msg->AppendString(2, name_);
|
|
}
|
|
|
|
msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
|
|
}
|
|
|
|
|
|
TrackEventDefaults::TrackEventDefaults() = default;
|
|
TrackEventDefaults::~TrackEventDefaults() = default;
|
|
TrackEventDefaults::TrackEventDefaults(const TrackEventDefaults&) = default;
|
|
TrackEventDefaults& TrackEventDefaults::operator=(const TrackEventDefaults&) = default;
|
|
TrackEventDefaults::TrackEventDefaults(TrackEventDefaults&&) noexcept = default;
|
|
TrackEventDefaults& TrackEventDefaults::operator=(TrackEventDefaults&&) = default;
|
|
|
|
bool TrackEventDefaults::operator==(const TrackEventDefaults& other) const {
|
|
return unknown_fields_ == other.unknown_fields_
|
|
&& track_uuid_ == other.track_uuid_
|
|
&& extra_counter_track_uuids_ == other.extra_counter_track_uuids_;
|
|
}
|
|
|
|
bool TrackEventDefaults::ParseFromArray(const void* raw, size_t size) {
|
|
extra_counter_track_uuids_.clear();
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 11 /* track_uuid */:
|
|
field.get(&track_uuid_);
|
|
break;
|
|
case 31 /* extra_counter_track_uuids */:
|
|
extra_counter_track_uuids_.emplace_back();
|
|
field.get(&extra_counter_track_uuids_.back());
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string TrackEventDefaults::SerializeAsString() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> TrackEventDefaults::SerializeAsArray() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void TrackEventDefaults::Serialize(::protozero::Message* msg) const {
|
|
// Field 11: track_uuid
|
|
if (_has_field_[11]) {
|
|
msg->AppendVarInt(11, track_uuid_);
|
|
}
|
|
|
|
// Field 31: extra_counter_track_uuids
|
|
for (auto& it : extra_counter_track_uuids_) {
|
|
msg->AppendVarInt(31, it);
|
|
}
|
|
|
|
msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
|
|
}
|
|
|
|
|
|
TrackEvent::TrackEvent() = default;
|
|
TrackEvent::~TrackEvent() = default;
|
|
TrackEvent::TrackEvent(const TrackEvent&) = default;
|
|
TrackEvent& TrackEvent::operator=(const TrackEvent&) = default;
|
|
TrackEvent::TrackEvent(TrackEvent&&) noexcept = default;
|
|
TrackEvent& TrackEvent::operator=(TrackEvent&&) = default;
|
|
|
|
bool TrackEvent::operator==(const TrackEvent& other) const {
|
|
return unknown_fields_ == other.unknown_fields_
|
|
&& category_iids_ == other.category_iids_
|
|
&& categories_ == other.categories_
|
|
&& name_iid_ == other.name_iid_
|
|
&& name_ == other.name_
|
|
&& type_ == other.type_
|
|
&& track_uuid_ == other.track_uuid_
|
|
&& counter_value_ == other.counter_value_
|
|
&& extra_counter_track_uuids_ == other.extra_counter_track_uuids_
|
|
&& extra_counter_values_ == other.extra_counter_values_
|
|
&& debug_annotations_ == other.debug_annotations_
|
|
&& task_execution_ == other.task_execution_
|
|
&& log_message_ == other.log_message_
|
|
&& cc_scheduler_state_ == other.cc_scheduler_state_
|
|
&& chrome_user_event_ == other.chrome_user_event_
|
|
&& chrome_keyed_service_ == other.chrome_keyed_service_
|
|
&& chrome_legacy_ipc_ == other.chrome_legacy_ipc_
|
|
&& chrome_histogram_sample_ == other.chrome_histogram_sample_
|
|
&& chrome_latency_info_ == other.chrome_latency_info_
|
|
&& chrome_frame_reporter_ == other.chrome_frame_reporter_
|
|
&& timestamp_delta_us_ == other.timestamp_delta_us_
|
|
&& timestamp_absolute_us_ == other.timestamp_absolute_us_
|
|
&& thread_time_delta_us_ == other.thread_time_delta_us_
|
|
&& thread_time_absolute_us_ == other.thread_time_absolute_us_
|
|
&& thread_instruction_count_delta_ == other.thread_instruction_count_delta_
|
|
&& thread_instruction_count_absolute_ == other.thread_instruction_count_absolute_
|
|
&& legacy_event_ == other.legacy_event_;
|
|
}
|
|
|
|
bool TrackEvent::ParseFromArray(const void* raw, size_t size) {
|
|
category_iids_.clear();
|
|
categories_.clear();
|
|
extra_counter_track_uuids_.clear();
|
|
extra_counter_values_.clear();
|
|
debug_annotations_.clear();
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 3 /* category_iids */:
|
|
category_iids_.emplace_back();
|
|
field.get(&category_iids_.back());
|
|
break;
|
|
case 22 /* categories */:
|
|
categories_.emplace_back();
|
|
field.get(&categories_.back());
|
|
break;
|
|
case 10 /* name_iid */:
|
|
field.get(&name_iid_);
|
|
break;
|
|
case 23 /* name */:
|
|
field.get(&name_);
|
|
break;
|
|
case 9 /* type */:
|
|
field.get(&type_);
|
|
break;
|
|
case 11 /* track_uuid */:
|
|
field.get(&track_uuid_);
|
|
break;
|
|
case 30 /* counter_value */:
|
|
field.get(&counter_value_);
|
|
break;
|
|
case 31 /* extra_counter_track_uuids */:
|
|
extra_counter_track_uuids_.emplace_back();
|
|
field.get(&extra_counter_track_uuids_.back());
|
|
break;
|
|
case 12 /* extra_counter_values */:
|
|
extra_counter_values_.emplace_back();
|
|
field.get(&extra_counter_values_.back());
|
|
break;
|
|
case 4 /* debug_annotations */:
|
|
debug_annotations_.emplace_back();
|
|
debug_annotations_.back().ParseFromString(field.as_std_string());
|
|
break;
|
|
case 5 /* task_execution */:
|
|
(*task_execution_).ParseFromString(field.as_std_string());
|
|
break;
|
|
case 21 /* log_message */:
|
|
(*log_message_).ParseFromString(field.as_std_string());
|
|
break;
|
|
case 24 /* cc_scheduler_state */:
|
|
(*cc_scheduler_state_).ParseFromString(field.as_std_string());
|
|
break;
|
|
case 25 /* chrome_user_event */:
|
|
(*chrome_user_event_).ParseFromString(field.as_std_string());
|
|
break;
|
|
case 26 /* chrome_keyed_service */:
|
|
(*chrome_keyed_service_).ParseFromString(field.as_std_string());
|
|
break;
|
|
case 27 /* chrome_legacy_ipc */:
|
|
(*chrome_legacy_ipc_).ParseFromString(field.as_std_string());
|
|
break;
|
|
case 28 /* chrome_histogram_sample */:
|
|
(*chrome_histogram_sample_).ParseFromString(field.as_std_string());
|
|
break;
|
|
case 29 /* chrome_latency_info */:
|
|
(*chrome_latency_info_).ParseFromString(field.as_std_string());
|
|
break;
|
|
case 32 /* chrome_frame_reporter */:
|
|
(*chrome_frame_reporter_).ParseFromString(field.as_std_string());
|
|
break;
|
|
case 1 /* timestamp_delta_us */:
|
|
field.get(×tamp_delta_us_);
|
|
break;
|
|
case 16 /* timestamp_absolute_us */:
|
|
field.get(×tamp_absolute_us_);
|
|
break;
|
|
case 2 /* thread_time_delta_us */:
|
|
field.get(&thread_time_delta_us_);
|
|
break;
|
|
case 17 /* thread_time_absolute_us */:
|
|
field.get(&thread_time_absolute_us_);
|
|
break;
|
|
case 8 /* thread_instruction_count_delta */:
|
|
field.get(&thread_instruction_count_delta_);
|
|
break;
|
|
case 20 /* thread_instruction_count_absolute */:
|
|
field.get(&thread_instruction_count_absolute_);
|
|
break;
|
|
case 6 /* legacy_event */:
|
|
(*legacy_event_).ParseFromString(field.as_std_string());
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string TrackEvent::SerializeAsString() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> TrackEvent::SerializeAsArray() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void TrackEvent::Serialize(::protozero::Message* msg) const {
|
|
// Field 3: category_iids
|
|
for (auto& it : category_iids_) {
|
|
msg->AppendVarInt(3, it);
|
|
}
|
|
|
|
// Field 22: categories
|
|
for (auto& it : categories_) {
|
|
msg->AppendString(22, it);
|
|
}
|
|
|
|
// Field 10: name_iid
|
|
if (_has_field_[10]) {
|
|
msg->AppendVarInt(10, name_iid_);
|
|
}
|
|
|
|
// Field 23: name
|
|
if (_has_field_[23]) {
|
|
msg->AppendString(23, name_);
|
|
}
|
|
|
|
// Field 9: type
|
|
if (_has_field_[9]) {
|
|
msg->AppendVarInt(9, type_);
|
|
}
|
|
|
|
// Field 11: track_uuid
|
|
if (_has_field_[11]) {
|
|
msg->AppendVarInt(11, track_uuid_);
|
|
}
|
|
|
|
// Field 30: counter_value
|
|
if (_has_field_[30]) {
|
|
msg->AppendVarInt(30, counter_value_);
|
|
}
|
|
|
|
// Field 31: extra_counter_track_uuids
|
|
for (auto& it : extra_counter_track_uuids_) {
|
|
msg->AppendVarInt(31, it);
|
|
}
|
|
|
|
// Field 12: extra_counter_values
|
|
for (auto& it : extra_counter_values_) {
|
|
msg->AppendVarInt(12, it);
|
|
}
|
|
|
|
// Field 4: debug_annotations
|
|
for (auto& it : debug_annotations_) {
|
|
it.Serialize(msg->BeginNestedMessage<::protozero::Message>(4));
|
|
}
|
|
|
|
// Field 5: task_execution
|
|
if (_has_field_[5]) {
|
|
(*task_execution_).Serialize(msg->BeginNestedMessage<::protozero::Message>(5));
|
|
}
|
|
|
|
// Field 21: log_message
|
|
if (_has_field_[21]) {
|
|
(*log_message_).Serialize(msg->BeginNestedMessage<::protozero::Message>(21));
|
|
}
|
|
|
|
// Field 24: cc_scheduler_state
|
|
if (_has_field_[24]) {
|
|
(*cc_scheduler_state_).Serialize(msg->BeginNestedMessage<::protozero::Message>(24));
|
|
}
|
|
|
|
// Field 25: chrome_user_event
|
|
if (_has_field_[25]) {
|
|
(*chrome_user_event_).Serialize(msg->BeginNestedMessage<::protozero::Message>(25));
|
|
}
|
|
|
|
// Field 26: chrome_keyed_service
|
|
if (_has_field_[26]) {
|
|
(*chrome_keyed_service_).Serialize(msg->BeginNestedMessage<::protozero::Message>(26));
|
|
}
|
|
|
|
// Field 27: chrome_legacy_ipc
|
|
if (_has_field_[27]) {
|
|
(*chrome_legacy_ipc_).Serialize(msg->BeginNestedMessage<::protozero::Message>(27));
|
|
}
|
|
|
|
// Field 28: chrome_histogram_sample
|
|
if (_has_field_[28]) {
|
|
(*chrome_histogram_sample_).Serialize(msg->BeginNestedMessage<::protozero::Message>(28));
|
|
}
|
|
|
|
// Field 29: chrome_latency_info
|
|
if (_has_field_[29]) {
|
|
(*chrome_latency_info_).Serialize(msg->BeginNestedMessage<::protozero::Message>(29));
|
|
}
|
|
|
|
// Field 32: chrome_frame_reporter
|
|
if (_has_field_[32]) {
|
|
(*chrome_frame_reporter_).Serialize(msg->BeginNestedMessage<::protozero::Message>(32));
|
|
}
|
|
|
|
// Field 1: timestamp_delta_us
|
|
if (_has_field_[1]) {
|
|
msg->AppendVarInt(1, timestamp_delta_us_);
|
|
}
|
|
|
|
// Field 16: timestamp_absolute_us
|
|
if (_has_field_[16]) {
|
|
msg->AppendVarInt(16, timestamp_absolute_us_);
|
|
}
|
|
|
|
// Field 2: thread_time_delta_us
|
|
if (_has_field_[2]) {
|
|
msg->AppendVarInt(2, thread_time_delta_us_);
|
|
}
|
|
|
|
// Field 17: thread_time_absolute_us
|
|
if (_has_field_[17]) {
|
|
msg->AppendVarInt(17, thread_time_absolute_us_);
|
|
}
|
|
|
|
// Field 8: thread_instruction_count_delta
|
|
if (_has_field_[8]) {
|
|
msg->AppendVarInt(8, thread_instruction_count_delta_);
|
|
}
|
|
|
|
// Field 20: thread_instruction_count_absolute
|
|
if (_has_field_[20]) {
|
|
msg->AppendVarInt(20, thread_instruction_count_absolute_);
|
|
}
|
|
|
|
// Field 6: legacy_event
|
|
if (_has_field_[6]) {
|
|
(*legacy_event_).Serialize(msg->BeginNestedMessage<::protozero::Message>(6));
|
|
}
|
|
|
|
msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
|
|
}
|
|
|
|
|
|
TrackEvent_LegacyEvent::TrackEvent_LegacyEvent() = default;
|
|
TrackEvent_LegacyEvent::~TrackEvent_LegacyEvent() = default;
|
|
TrackEvent_LegacyEvent::TrackEvent_LegacyEvent(const TrackEvent_LegacyEvent&) = default;
|
|
TrackEvent_LegacyEvent& TrackEvent_LegacyEvent::operator=(const TrackEvent_LegacyEvent&) = default;
|
|
TrackEvent_LegacyEvent::TrackEvent_LegacyEvent(TrackEvent_LegacyEvent&&) noexcept = default;
|
|
TrackEvent_LegacyEvent& TrackEvent_LegacyEvent::operator=(TrackEvent_LegacyEvent&&) = default;
|
|
|
|
bool TrackEvent_LegacyEvent::operator==(const TrackEvent_LegacyEvent& other) const {
|
|
return unknown_fields_ == other.unknown_fields_
|
|
&& name_iid_ == other.name_iid_
|
|
&& phase_ == other.phase_
|
|
&& duration_us_ == other.duration_us_
|
|
&& thread_duration_us_ == other.thread_duration_us_
|
|
&& thread_instruction_delta_ == other.thread_instruction_delta_
|
|
&& unscoped_id_ == other.unscoped_id_
|
|
&& local_id_ == other.local_id_
|
|
&& global_id_ == other.global_id_
|
|
&& id_scope_ == other.id_scope_
|
|
&& use_async_tts_ == other.use_async_tts_
|
|
&& bind_id_ == other.bind_id_
|
|
&& bind_to_enclosing_ == other.bind_to_enclosing_
|
|
&& flow_direction_ == other.flow_direction_
|
|
&& instant_event_scope_ == other.instant_event_scope_
|
|
&& pid_override_ == other.pid_override_
|
|
&& tid_override_ == other.tid_override_;
|
|
}
|
|
|
|
bool TrackEvent_LegacyEvent::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* name_iid */:
|
|
field.get(&name_iid_);
|
|
break;
|
|
case 2 /* phase */:
|
|
field.get(&phase_);
|
|
break;
|
|
case 3 /* duration_us */:
|
|
field.get(&duration_us_);
|
|
break;
|
|
case 4 /* thread_duration_us */:
|
|
field.get(&thread_duration_us_);
|
|
break;
|
|
case 15 /* thread_instruction_delta */:
|
|
field.get(&thread_instruction_delta_);
|
|
break;
|
|
case 6 /* unscoped_id */:
|
|
field.get(&unscoped_id_);
|
|
break;
|
|
case 10 /* local_id */:
|
|
field.get(&local_id_);
|
|
break;
|
|
case 11 /* global_id */:
|
|
field.get(&global_id_);
|
|
break;
|
|
case 7 /* id_scope */:
|
|
field.get(&id_scope_);
|
|
break;
|
|
case 9 /* use_async_tts */:
|
|
field.get(&use_async_tts_);
|
|
break;
|
|
case 8 /* bind_id */:
|
|
field.get(&bind_id_);
|
|
break;
|
|
case 12 /* bind_to_enclosing */:
|
|
field.get(&bind_to_enclosing_);
|
|
break;
|
|
case 13 /* flow_direction */:
|
|
field.get(&flow_direction_);
|
|
break;
|
|
case 14 /* instant_event_scope */:
|
|
field.get(&instant_event_scope_);
|
|
break;
|
|
case 18 /* pid_override */:
|
|
field.get(&pid_override_);
|
|
break;
|
|
case 19 /* tid_override */:
|
|
field.get(&tid_override_);
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string TrackEvent_LegacyEvent::SerializeAsString() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> TrackEvent_LegacyEvent::SerializeAsArray() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void TrackEvent_LegacyEvent::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: name_iid
|
|
if (_has_field_[1]) {
|
|
msg->AppendVarInt(1, name_iid_);
|
|
}
|
|
|
|
// Field 2: phase
|
|
if (_has_field_[2]) {
|
|
msg->AppendVarInt(2, phase_);
|
|
}
|
|
|
|
// Field 3: duration_us
|
|
if (_has_field_[3]) {
|
|
msg->AppendVarInt(3, duration_us_);
|
|
}
|
|
|
|
// Field 4: thread_duration_us
|
|
if (_has_field_[4]) {
|
|
msg->AppendVarInt(4, thread_duration_us_);
|
|
}
|
|
|
|
// Field 15: thread_instruction_delta
|
|
if (_has_field_[15]) {
|
|
msg->AppendVarInt(15, thread_instruction_delta_);
|
|
}
|
|
|
|
// Field 6: unscoped_id
|
|
if (_has_field_[6]) {
|
|
msg->AppendVarInt(6, unscoped_id_);
|
|
}
|
|
|
|
// Field 10: local_id
|
|
if (_has_field_[10]) {
|
|
msg->AppendVarInt(10, local_id_);
|
|
}
|
|
|
|
// Field 11: global_id
|
|
if (_has_field_[11]) {
|
|
msg->AppendVarInt(11, global_id_);
|
|
}
|
|
|
|
// Field 7: id_scope
|
|
if (_has_field_[7]) {
|
|
msg->AppendString(7, id_scope_);
|
|
}
|
|
|
|
// Field 9: use_async_tts
|
|
if (_has_field_[9]) {
|
|
msg->AppendTinyVarInt(9, use_async_tts_);
|
|
}
|
|
|
|
// Field 8: bind_id
|
|
if (_has_field_[8]) {
|
|
msg->AppendVarInt(8, bind_id_);
|
|
}
|
|
|
|
// Field 12: bind_to_enclosing
|
|
if (_has_field_[12]) {
|
|
msg->AppendTinyVarInt(12, bind_to_enclosing_);
|
|
}
|
|
|
|
// Field 13: flow_direction
|
|
if (_has_field_[13]) {
|
|
msg->AppendVarInt(13, flow_direction_);
|
|
}
|
|
|
|
// Field 14: instant_event_scope
|
|
if (_has_field_[14]) {
|
|
msg->AppendVarInt(14, instant_event_scope_);
|
|
}
|
|
|
|
// Field 18: pid_override
|
|
if (_has_field_[18]) {
|
|
msg->AppendVarInt(18, pid_override_);
|
|
}
|
|
|
|
// Field 19: tid_override
|
|
if (_has_field_[19]) {
|
|
msg->AppendVarInt(19, tid_override_);
|
|
}
|
|
|
|
msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
|
|
}
|
|
|
|
} // namespace perfetto
|
|
} // namespace protos
|
|
} // namespace gen
|
|
#pragma GCC diagnostic pop
|
|
// gen_amalgamated begin source: gen/protos/perfetto/config/android/android_log_config.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/config/android/android_polled_state_config.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/config/android/packages_list_config.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/config/ftrace/ftrace_config.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/config/gpu/gpu_counter_config.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/config/gpu/vulkan_memory_config.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/config/inode_file/inode_file_config.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/config/power/android_power_config.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/config/process_stats/process_stats_config.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/config/profiling/heapprofd_config.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/config/profiling/java_hprof_config.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/config/profiling/perf_event_config.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/config/sys_stats/sys_stats_config.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/config/track_event/track_event_config.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/config/chrome/chrome_config.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/config/data_source_config.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/config/stress_test_config.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/config/test_config.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/config/trace_config.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/clock_snapshot.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/trigger.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/system_info.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/android/android_log.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/android/gpu_mem_event.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/android/graphics_frame_event.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/android/initial_display_state.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/android/packages_list.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/chrome/chrome_benchmark_metadata.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/chrome/chrome_metadata.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/chrome/chrome_trace_event.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/filesystem/inode_file_map.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/ftrace/ftrace_event.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/ftrace/ftrace_event_bundle.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/ftrace/ftrace_stats.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/ftrace/test_bundle_wrapper.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/ftrace/generic.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/ftrace/binder.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/ftrace/block.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/ftrace/cgroup.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/ftrace/clk.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/ftrace/compaction.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/ftrace/cpuhp.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/ftrace/ext4.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/ftrace/f2fs.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/ftrace/fence.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/ftrace/filemap.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/ftrace/ftrace.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/ftrace/gpu_mem.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/ftrace/i2c.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/ftrace/ion.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/ftrace/ipi.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/ftrace/irq.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/ftrace/kmem.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/ftrace/lowmemorykiller.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/ftrace/mdss.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/ftrace/mm_event.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/ftrace/oom.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/ftrace/power.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/ftrace/raw_syscalls.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/ftrace/regulator.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/ftrace/sched.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/ftrace/scm.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/ftrace/sde.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/ftrace/signal.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/ftrace/sync.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/ftrace/systrace.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/ftrace/task.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/ftrace/thermal.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/ftrace/vmscan.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/ftrace/workqueue.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/perfetto/perfetto_metatrace.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/perfetto/tracing_service_event.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/power/battery_counters.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/power/power_rails.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/ps/process_stats.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/ps/process_tree.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/sys_stats/sys_stats.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/system_info/cpu_info.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/trace_packet_defaults.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/test_event.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/trace_packet.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/trace.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/extension_descriptor.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/memory_graph.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: src/tracing/trace_writer_base.cc
|
|
/*
|
|
* Copyright (C) 2019 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/tracing/trace_writer_base.h"
|
|
|
|
namespace perfetto {
|
|
|
|
// This destructor needs to be defined in a dedicated translation unit and
|
|
// cannot be merged together with the other ones in virtual_destructors.cc.
|
|
// This is because trace_writer_base.h/cc is part of a separate target
|
|
// (src/public:common) that is linked also by other part of the codebase.
|
|
|
|
TraceWriterBase::~TraceWriterBase() = default;
|
|
|
|
} // namespace perfetto
|
|
// gen_amalgamated begin source: src/tracing/core/id_allocator.cc
|
|
// gen_amalgamated begin header: src/tracing/core/id_allocator.h
|
|
/*
|
|
* Copyright (C) 2017 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
#ifndef SRC_TRACING_CORE_ID_ALLOCATOR_H_
|
|
#define SRC_TRACING_CORE_ID_ALLOCATOR_H_
|
|
|
|
#include <stdint.h>
|
|
|
|
#include <type_traits>
|
|
#include <vector>
|
|
|
|
namespace perfetto {
|
|
|
|
// Handles assigment of IDs (int types) from a fixed-size pool.
|
|
// Zero is not considered a valid ID.
|
|
// The base class takes always a uint32_t and the derived class casts and checks
|
|
// bounds at compile time. This is to avoid bloating code with different
|
|
// instances of the main class for each size.
|
|
class IdAllocatorGeneric {
|
|
public:
|
|
// |max_id| is inclusive.
|
|
explicit IdAllocatorGeneric(uint32_t max_id);
|
|
~IdAllocatorGeneric();
|
|
|
|
// Returns an ID in the range [1, max_id] or 0 if no more ids are available.
|
|
uint32_t AllocateGeneric();
|
|
void FreeGeneric(uint32_t);
|
|
|
|
private:
|
|
IdAllocatorGeneric(const IdAllocatorGeneric&) = delete;
|
|
IdAllocatorGeneric& operator=(const IdAllocatorGeneric&) = delete;
|
|
|
|
const uint32_t max_id_;
|
|
uint32_t last_id_ = 0;
|
|
std::vector<bool> ids_;
|
|
};
|
|
|
|
template <typename T = uint32_t>
|
|
class IdAllocator : public IdAllocatorGeneric {
|
|
public:
|
|
explicit IdAllocator(T end) : IdAllocatorGeneric(end) {
|
|
static_assert(std::is_integral<T>::value && std::is_unsigned<T>::value,
|
|
"T must be an unsigned integer");
|
|
static_assert(sizeof(T) <= sizeof(uint32_t), "T is too big");
|
|
}
|
|
|
|
T Allocate() { return static_cast<T>(AllocateGeneric()); }
|
|
void Free(T id) { FreeGeneric(id); }
|
|
};
|
|
|
|
} // namespace perfetto
|
|
|
|
#endif // SRC_TRACING_CORE_ID_ALLOCATOR_H_
|
|
/*
|
|
* Copyright (C) 2017 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
// gen_amalgamated expanded: #include "src/tracing/core/id_allocator.h"
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/base/logging.h"
|
|
|
|
namespace perfetto {
|
|
|
|
IdAllocatorGeneric::IdAllocatorGeneric(uint32_t max_id) : max_id_(max_id) {
|
|
PERFETTO_DCHECK(max_id > 1);
|
|
}
|
|
|
|
IdAllocatorGeneric::~IdAllocatorGeneric() = default;
|
|
|
|
uint32_t IdAllocatorGeneric::AllocateGeneric() {
|
|
for (uint32_t ignored = 1; ignored <= max_id_; ignored++) {
|
|
last_id_ = last_id_ < max_id_ ? last_id_ + 1 : 1;
|
|
const auto id = last_id_;
|
|
|
|
// 0 is never a valid ID. So if we are looking for |id| == N and there are
|
|
// N or less elements in the vector, they must necessarily be all < N.
|
|
// e.g. if |id| == 4 and size() == 4, the vector will contain IDs 0,1,2,3.
|
|
if (id >= ids_.size()) {
|
|
ids_.resize(id + 1);
|
|
ids_[id] = true;
|
|
return id;
|
|
}
|
|
|
|
if (!ids_[id]) {
|
|
ids_[id] = true;
|
|
return id;
|
|
}
|
|
}
|
|
return 0;
|
|
}
|
|
|
|
void IdAllocatorGeneric::FreeGeneric(uint32_t id) {
|
|
if (id == 0 || id >= ids_.size() || !ids_[id]) {
|
|
PERFETTO_DFATAL("Invalid id.");
|
|
return;
|
|
}
|
|
ids_[id] = false;
|
|
}
|
|
|
|
} // namespace perfetto
|
|
// gen_amalgamated begin source: src/tracing/core/null_trace_writer.cc
|
|
// gen_amalgamated begin header: src/tracing/core/null_trace_writer.h
|
|
// gen_amalgamated begin header: include/perfetto/ext/tracing/core/trace_writer.h
|
|
// gen_amalgamated begin header: include/perfetto/ext/tracing/core/basic_types.h
|
|
/*
|
|
* Copyright (C) 2017 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
#ifndef INCLUDE_PERFETTO_EXT_TRACING_CORE_BASIC_TYPES_H_
|
|
#define INCLUDE_PERFETTO_EXT_TRACING_CORE_BASIC_TYPES_H_
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/base/build_config.h"
|
|
|
|
#include <stddef.h>
|
|
#include <stdint.h>
|
|
#include <sys/types.h>
|
|
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
|
|
using uid_t = unsigned int;
|
|
#endif
|
|
|
|
namespace perfetto {
|
|
|
|
// Unique within the scope of the tracing service.
|
|
using TracingSessionID = uint64_t;
|
|
|
|
// Unique within the scope of the tracing service.
|
|
using ProducerID = uint16_t;
|
|
|
|
// Unique within the scope of the tracing service.
|
|
using DataSourceInstanceID = uint64_t;
|
|
|
|
// Unique within the scope of a Producer.
|
|
using WriterID = uint16_t;
|
|
|
|
// Unique within the scope of the tracing service.
|
|
using FlushRequestID = uint64_t;
|
|
|
|
// We need one FD per producer and we are not going to be able to keep > 64k FDs
|
|
// open in the service.
|
|
static constexpr ProducerID kMaxProducerID = static_cast<ProducerID>(-1);
|
|
|
|
// 1024 Writers per producer seems a resonable bound. This reduces the ability
|
|
// to memory-DoS the service by having to keep track of too many writer IDs.
|
|
static constexpr WriterID kMaxWriterID = static_cast<WriterID>((1 << 10) - 1);
|
|
|
|
// Unique within the scope of a {ProducerID, WriterID} tuple.
|
|
using ChunkID = uint32_t;
|
|
static constexpr ChunkID kMaxChunkID = static_cast<ChunkID>(-1);
|
|
|
|
// Unique within the scope of the tracing service.
|
|
using BufferID = uint16_t;
|
|
|
|
// Target buffer ID for SharedMemoryArbiter. Values up to max uint16_t are
|
|
// equivalent to a bound BufferID. Values above max uint16_t are reservation IDs
|
|
// for the target buffer of a startup trace writer. Reservation IDs will be
|
|
// translated to actual BufferIDs after they are bound by
|
|
// SharedMemoryArbiter::BindStartupTargetBuffer().
|
|
using MaybeUnboundBufferID = uint32_t;
|
|
|
|
// Keep this in sync with SharedMemoryABI::PageHeader::target_buffer.
|
|
static constexpr BufferID kMaxTraceBufferID = static_cast<BufferID>(-1);
|
|
|
|
// Unique within the scope of a tracing session.
|
|
using PacketSequenceID = uint32_t;
|
|
// Used for extra packets emitted by the service, such as statistics.
|
|
static constexpr PacketSequenceID kServicePacketSequenceID = 1;
|
|
static constexpr PacketSequenceID kMaxPacketSequenceID =
|
|
static_cast<PacketSequenceID>(-1);
|
|
|
|
constexpr uid_t kInvalidUid = static_cast<uid_t>(-1);
|
|
|
|
constexpr uint32_t kDefaultFlushTimeoutMs = 5000;
|
|
|
|
} // namespace perfetto
|
|
|
|
#endif // INCLUDE_PERFETTO_EXT_TRACING_CORE_BASIC_TYPES_H_
|
|
/*
|
|
* Copyright (C) 2017 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
#ifndef INCLUDE_PERFETTO_EXT_TRACING_CORE_TRACE_WRITER_H_
|
|
#define INCLUDE_PERFETTO_EXT_TRACING_CORE_TRACE_WRITER_H_
|
|
|
|
#include <functional>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/base/export.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/basic_types.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/message_handle.h"
|
|
// gen_amalgamated expanded: #include "perfetto/tracing/trace_writer_base.h"
|
|
|
|
namespace perfetto {
|
|
|
|
namespace protos {
|
|
namespace pbzero {
|
|
class TracePacket;
|
|
} // namespace pbzero
|
|
} // namespace protos
|
|
|
|
// This is a single-thread write interface that allows to write protobufs
|
|
// directly into the tracing shared buffer without making any copies.
|
|
// It takes care of acquiring and releasing chunks from the
|
|
// SharedMemoryArbiter and splitting protos over chunks.
|
|
// The idea is that each data source creates one (or more) TraceWriter for each
|
|
// thread it wants to write from. Each TraceWriter will get its own dedicated
|
|
// chunk and will write into the shared buffer without any locking most of the
|
|
// time. Locking will happen only when a chunk is exhausted and a new one is
|
|
// acquired from the arbiter.
|
|
|
|
// TODO: TraceWriter needs to keep the shared memory buffer alive (refcount?).
|
|
// Otherwise if the shared memory buffer goes away (e.g. the Service crashes)
|
|
// the TraceWriter will keep writing into unmapped memory.
|
|
|
|
class PERFETTO_EXPORT TraceWriter : public TraceWriterBase {
|
|
public:
|
|
using TracePacketHandle =
|
|
protozero::MessageHandle<protos::pbzero::TracePacket>;
|
|
|
|
TraceWriter();
|
|
~TraceWriter() override;
|
|
|
|
// Returns a handle to the root proto message for the trace. The message will
|
|
// be finalized either by calling directly handle.Finalize() or by letting the
|
|
// handle go out of scope. The returned handle can be std::move()'d but cannot
|
|
// be used after either: (i) the TraceWriter instance is destroyed, (ii) a
|
|
// subsequence NewTracePacket() call is made on the same TraceWriter instance.
|
|
TracePacketHandle NewTracePacket() override = 0;
|
|
|
|
// Commits the data pending for the current chunk into the shared memory
|
|
// buffer and sends a CommitDataRequest() to the service. This can be called
|
|
// only if the handle returned by NewTracePacket() has been destroyed (i.e. we
|
|
// cannot Flush() while writing a TracePacket).
|
|
// Note: Flush() also happens implicitly when destroying the TraceWriter.
|
|
// |callback| is an optional callback. When non-null it will request the
|
|
// service to ACK the flush and will be invoked after the service has
|
|
// acknowledged it. The callback might be NEVER INVOKED if the service crashes
|
|
// or the IPC connection is dropped. The callback should be used only by tests
|
|
// and best-effort features (logging).
|
|
// TODO(primiano): right now the |callback| will be called on the IPC thread.
|
|
// This is fine in the current single-thread scenario, but long-term
|
|
// trace_writer_impl.cc should be smarter and post it on the right thread.
|
|
void Flush(std::function<void()> callback = {}) override = 0;
|
|
|
|
virtual WriterID writer_id() const = 0;
|
|
|
|
// Bytes written since creation. Is not reset when new chunks are acquired.
|
|
virtual uint64_t written() const override = 0;
|
|
|
|
private:
|
|
TraceWriter(const TraceWriter&) = delete;
|
|
TraceWriter& operator=(const TraceWriter&) = delete;
|
|
};
|
|
|
|
} // namespace perfetto
|
|
|
|
#endif // INCLUDE_PERFETTO_EXT_TRACING_CORE_TRACE_WRITER_H_
|
|
/*
|
|
* Copyright (C) 2018 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
#ifndef SRC_TRACING_CORE_NULL_TRACE_WRITER_H_
|
|
#define SRC_TRACING_CORE_NULL_TRACE_WRITER_H_
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/trace_writer.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/message_handle.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/root_message.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/scattered_stream_null_delegate.h"
|
|
|
|
namespace perfetto {
|
|
|
|
// A specialization of TraceWriter which no-ops all the writes routing them
|
|
// into a fixed region of memory
|
|
// See //include/perfetto/tracing/core/trace_writer.h for docs.
|
|
class NullTraceWriter : public TraceWriter {
|
|
public:
|
|
NullTraceWriter();
|
|
~NullTraceWriter() override;
|
|
|
|
// TraceWriter implementation. See documentation in trace_writer.h.
|
|
// TracePacketHandle is defined in trace_writer.h
|
|
TracePacketHandle NewTracePacket() override;
|
|
void Flush(std::function<void()> callback = {}) override;
|
|
WriterID writer_id() const override;
|
|
uint64_t written() const override;
|
|
|
|
private:
|
|
NullTraceWriter(const NullTraceWriter&) = delete;
|
|
NullTraceWriter& operator=(const NullTraceWriter&) = delete;
|
|
|
|
protozero::ScatteredStreamWriterNullDelegate delegate_;
|
|
protozero::ScatteredStreamWriter stream_;
|
|
|
|
// The packet returned via NewTracePacket(). Its owned by this class,
|
|
// TracePacketHandle has just a pointer to it.
|
|
std::unique_ptr<protozero::RootMessage<protos::pbzero::TracePacket>>
|
|
cur_packet_;
|
|
};
|
|
|
|
} // namespace perfetto
|
|
|
|
#endif // SRC_TRACING_CORE_NULL_TRACE_WRITER_H_
|
|
/*
|
|
* Copyright (C) 2018 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
// gen_amalgamated expanded: #include "src/tracing/core/null_trace_writer.h"
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/base/logging.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/utils.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
|
|
|
|
// gen_amalgamated expanded: #include "protos/perfetto/trace/trace_packet.pbzero.h"
|
|
|
|
namespace perfetto {
|
|
|
|
NullTraceWriter::NullTraceWriter() : delegate_(4096), stream_(&delegate_) {
|
|
cur_packet_.reset(new protozero::RootMessage<protos::pbzero::TracePacket>());
|
|
cur_packet_->Finalize(); // To avoid the DCHECK in NewTracePacket().
|
|
}
|
|
|
|
NullTraceWriter::~NullTraceWriter() {}
|
|
|
|
void NullTraceWriter::Flush(std::function<void()> callback) {
|
|
// Flush() cannot be called in the middle of a TracePacket.
|
|
PERFETTO_CHECK(cur_packet_->is_finalized());
|
|
|
|
if (callback)
|
|
callback();
|
|
}
|
|
|
|
NullTraceWriter::TracePacketHandle NullTraceWriter::NewTracePacket() {
|
|
// If we hit this, the caller is calling NewTracePacket() without having
|
|
// finalized the previous packet.
|
|
PERFETTO_DCHECK(cur_packet_->is_finalized());
|
|
cur_packet_->Reset(&stream_);
|
|
return TraceWriter::TracePacketHandle(cur_packet_.get());
|
|
}
|
|
|
|
WriterID NullTraceWriter::writer_id() const {
|
|
return 0;
|
|
}
|
|
|
|
uint64_t NullTraceWriter::written() const {
|
|
return 0;
|
|
}
|
|
|
|
} // namespace perfetto
|
|
// gen_amalgamated begin source: src/tracing/core/shared_memory_abi.cc
|
|
// gen_amalgamated begin header: include/perfetto/ext/tracing/core/shared_memory_abi.h
|
|
/*
|
|
* Copyright (C) 2017 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
#ifndef INCLUDE_PERFETTO_EXT_TRACING_CORE_SHARED_MEMORY_ABI_H_
|
|
#define INCLUDE_PERFETTO_EXT_TRACING_CORE_SHARED_MEMORY_ABI_H_
|
|
|
|
#include <stddef.h>
|
|
#include <stdint.h>
|
|
|
|
#include <array>
|
|
#include <atomic>
|
|
#include <bitset>
|
|
#include <thread>
|
|
#include <type_traits>
|
|
#include <utility>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/base/logging.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/proto_utils.h"
|
|
|
|
namespace perfetto {
|
|
|
|
// This file defines the binary interface of the memory buffers shared between
|
|
// Producer and Service. This is a long-term stable ABI and has to be backwards
|
|
// compatible to deal with mismatching Producer and Service versions.
|
|
//
|
|
// Overview
|
|
// --------
|
|
// SMB := "Shared Memory Buffer".
|
|
// In the most typical case of a multi-process architecture (i.e. Producer and
|
|
// Service are hosted by different processes), a Producer means almost always
|
|
// a "client process producing data" (almost: in some cases a process might host
|
|
// > 1 Producer, if it links two libraries, independent of each other, that both
|
|
// use Perfetto tracing).
|
|
// The Service has one SMB for each Producer.
|
|
// A producer has one or (typically) more data sources. They all share the same
|
|
// SMB.
|
|
// The SMB is a staging area to decouple data sources living in the Producer
|
|
// and allow them to do non-blocking async writes.
|
|
// The SMB is *not* the ultimate logging buffer seen by the Consumer. That one
|
|
// is larger (~MBs) and not shared with Producers.
|
|
// Each SMB is small, typically few KB. Its size is configurable by the producer
|
|
// within a max limit of ~MB (see kMaxShmSize in tracing_service_impl.cc).
|
|
// The SMB is partitioned into fixed-size Page(s). The size of the Pages are
|
|
// determined by each Producer at connection time and cannot be changed.
|
|
// Hence, different producers can have SMB(s) that have a different Page size
|
|
// from each other, but the page size will be constant throughout all the
|
|
// lifetime of the SMB.
|
|
// Page(s) are partitioned by the Producer into variable size Chunk(s):
|
|
//
|
|
// +------------+ +--------------------------+
|
|
// | Producer 1 | <-> | SMB 1 [~32K - 1MB] |
|
|
// +------------+ +--------+--------+--------+
|
|
// | Page | Page | Page |
|
|
// +--------+--------+--------+
|
|
// | Chunk | | Chunk |
|
|
// +--------+ Chunk +--------+ <----+
|
|
// | Chunk | | Chunk | |
|
|
// +--------+--------+--------+ +---------------------+
|
|
// | Service |
|
|
// +------------+ +--------------------------+ +---------------------+
|
|
// | Producer 2 | <-> | SMB 2 [~32K - 1MB] | /| large ring buffers |
|
|
// +------------+ +--------+--------+--------+ <--+ | (100K - several MB) |
|
|
// | Page | Page | Page | +---------------------+
|
|
// +--------+--------+--------+
|
|
// | Chunk | | Chunk |
|
|
// +--------+ Chunk +--------+
|
|
// | Chunk | | Chunk |
|
|
// +--------+--------+--------+
|
|
//
|
|
// * Sizes of both SMB and ring buffers are purely indicative and decided at
|
|
// configuration time by the Producer (for SMB sizes) and the Consumer (for the
|
|
// final ring buffer size).
|
|
|
|
// Page
|
|
// ----
|
|
// A page is a portion of the shared memory buffer and defines the granularity
|
|
// of the interaction between the Producer and tracing Service. When scanning
|
|
// the shared memory buffer to determine if something should be moved to the
|
|
// central logging buffers, the Service most of the times looks at and moves
|
|
// whole pages. Similarly, the Producer sends an IPC to invite the Service to
|
|
// drain the shared memory buffer only when a whole page is filled.
|
|
// Having fixed the total SMB size (hence the total memory overhead), the page
|
|
// size is a triangular tradeoff between:
|
|
// 1) IPC traffic: smaller pages -> more IPCs.
|
|
// 2) Producer lock freedom: larger pages -> larger chunks -> data sources can
|
|
// write more data without needing to swap chunks and synchronize.
|
|
// 3) Risk of write-starving the SMB: larger pages -> higher chance that the
|
|
// Service won't manage to drain them and the SMB remains full.
|
|
// The page size, on the other side, has no implications on wasted memory due to
|
|
// fragmentations (see Chunk below).
|
|
// The size of the page is chosen by the Service at connection time and stays
|
|
// fixed throughout all the lifetime of the Producer. Different producers (i.e.
|
|
// ~ different client processes) can use different page sizes.
|
|
// The page size must be an integer multiple of 4k (this is to allow VM page
|
|
// stealing optimizations) and obviously has to be an integer divisor of the
|
|
// total SMB size.
|
|
|
|
// Chunk
|
|
// -----
|
|
// A chunk is a portion of a Page which is written and handled by a Producer.
|
|
// A chunk contains a linear sequence of TracePacket(s) (the root proto).
|
|
// A chunk cannot be written concurrently by two data sources. Protobufs must be
|
|
// encoded as contiguous byte streams and cannot be interleaved. Therefore, on
|
|
// the Producer side, a chunk is almost always owned exclusively by one thread
|
|
// (% extremely peculiar slow-path cases).
|
|
// Chunks are essentially single-writer single-thread lock-free arenas. Locking
|
|
// happens only when a Chunk is full and a new one needs to be acquired.
|
|
// Locking happens only within the scope of a Producer process. There is no
|
|
// inter-process locking. The Producer cannot lock the Service and viceversa.
|
|
// In the worst case, any of the two can starve the SMB, by marking all chunks
|
|
// as either being read or written. But that has the only side effect of
|
|
// losing the trace data.
|
|
// The Producer can decide to partition each page into a number of limited
|
|
// configurations (e.g., 1 page == 1 chunk, 1 page == 2 chunks and so on).
|
|
|
|
// TracePacket
|
|
// -----------
|
|
// Is the atom of tracing. Putting aside pages and chunks a trace is merely a
|
|
// sequence of TracePacket(s). TracePacket is the root protobuf message.
|
|
// A TracePacket can span across several chunks (hence even across several
|
|
// pages). A TracePacket can therefore be >> chunk size, >> page size and even
|
|
// >> SMB size. The Chunk header carries metadata to deal with the TracePacket
|
|
// splitting case.
|
|
|
|
// Use only explicitly-sized types below. DO NOT use size_t or any architecture
|
|
// dependent size (e.g. size_t) in the struct fields. This buffer will be read
|
|
// and written by processes that have a different bitness in the same OS.
|
|
// Instead it's fine to assume little-endianess. Big-endian is a dream we are
|
|
// not currently pursuing.
|
|
|
|
class SharedMemoryABI {
|
|
public:
|
|
static constexpr size_t kMinPageSize = 4 * 1024;
|
|
|
|
// This is due to Chunk::size being 16 bits.
|
|
static constexpr size_t kMaxPageSize = 64 * 1024;
|
|
|
|
// "14" is the max number that can be encoded in a 32 bit atomic word using
|
|
// 2 state bits per Chunk and leaving 4 bits for the page layout.
|
|
// See PageLayout below.
|
|
static constexpr size_t kMaxChunksPerPage = 14;
|
|
|
|
// Each TracePacket in the Chunk is prefixed by a 4 bytes redundant VarInt
|
|
// (see proto_utils.h) stating its size.
|
|
static constexpr size_t kPacketHeaderSize = 4;
|
|
|
|
// TraceWriter specifies this invalid packet/fragment size to signal to the
|
|
// service that a packet should be discarded, because the TraceWriter couldn't
|
|
// write its remaining fragments (e.g. because the SMB was exhausted).
|
|
static constexpr size_t kPacketSizeDropPacket =
|
|
protozero::proto_utils::kMaxMessageLength;
|
|
|
|
// Chunk states and transitions:
|
|
// kChunkFree <----------------+
|
|
// | (Producer) |
|
|
// V |
|
|
// kChunkBeingWritten |
|
|
// | (Producer) |
|
|
// V |
|
|
// kChunkComplete |
|
|
// | (Service) |
|
|
// V |
|
|
// kChunkBeingRead |
|
|
// | (Service) |
|
|
// +------------------------+
|
|
enum ChunkState : uint32_t {
|
|
// The Chunk is free. The Service shall never touch it, the Producer can
|
|
// acquire it and transition it into kChunkBeingWritten.
|
|
kChunkFree = 0,
|
|
|
|
// The Chunk is being used by the Producer and is not complete yet.
|
|
// The Service shall never touch kChunkBeingWritten pages.
|
|
kChunkBeingWritten = 1,
|
|
|
|
// The Service is moving the page into its non-shared ring buffer. The
|
|
// Producer shall never touch kChunkBeingRead pages.
|
|
kChunkBeingRead = 2,
|
|
|
|
// The Producer is done writing the page and won't touch it again. The
|
|
// Service can now move it to its non-shared ring buffer.
|
|
// kAllChunksComplete relies on this being == 3.
|
|
kChunkComplete = 3,
|
|
};
|
|
static constexpr const char* kChunkStateStr[] = {"Free", "BeingWritten",
|
|
"BeingRead", "Complete"};
|
|
|
|
enum PageLayout : uint32_t {
|
|
// The page is fully free and has not been partitioned yet.
|
|
kPageNotPartitioned = 0,
|
|
|
|
// TODO(primiano): Aligning a chunk @ 16 bytes could allow to use faster
|
|
// intrinsics based on quad-word moves. Do the math and check what is the
|
|
// fragmentation loss.
|
|
|
|
// align4(X) := the largest integer N s.t. (N % 4) == 0 && N <= X.
|
|
// 8 == sizeof(PageHeader).
|
|
kPageDiv1 = 1, // Only one chunk of size: PAGE_SIZE - 8.
|
|
kPageDiv2 = 2, // Two chunks of size: align4((PAGE_SIZE - 8) / 2).
|
|
kPageDiv4 = 3, // Four chunks of size: align4((PAGE_SIZE - 8) / 4).
|
|
kPageDiv7 = 4, // Seven chunks of size: align4((PAGE_SIZE - 8) / 7).
|
|
kPageDiv14 = 5, // Fourteen chunks of size: align4((PAGE_SIZE - 8) / 14).
|
|
|
|
// The rationale for 7 and 14 above is to maximize the page usage for the
|
|
// likely case of |page_size| == 4096:
|
|
// (((4096 - 8) / 14) % 4) == 0, while (((4096 - 8) / 16 % 4)) == 3. So
|
|
// Div16 would waste 3 * 16 = 48 bytes per page for chunk alignment gaps.
|
|
|
|
kPageDivReserved1 = 6,
|
|
kPageDivReserved2 = 7,
|
|
kNumPageLayouts = 8,
|
|
};
|
|
|
|
// Keep this consistent with the PageLayout enum above.
|
|
static constexpr uint32_t kNumChunksForLayout[] = {0, 1, 2, 4, 7, 14, 0, 0};
|
|
|
|
// Layout of a Page.
|
|
// +===================================================+
|
|
// | Page header [8 bytes] |
|
|
// | Tells how many chunks there are, how big they are |
|
|
// | and their state (free, read, write, complete). |
|
|
// +===================================================+
|
|
// +***************************************************+
|
|
// | Chunk #0 header [8 bytes] |
|
|
// | Tells how many packets there are and whether the |
|
|
// | whether the 1st and last ones are fragmented. |
|
|
// | Also has a chunk id to reassemble fragments. |
|
|
// +***************************************************+
|
|
// +---------------------------------------------------+
|
|
// | Packet #0 size [varint, up to 4 bytes] |
|
|
// + - - - - - - - - - - - - - - - - - - - - - - - - - +
|
|
// | Packet #0 payload |
|
|
// | A TracePacket protobuf message |
|
|
// +---------------------------------------------------+
|
|
// ...
|
|
// + . . . . . . . . . . . . . . . . . . . . . . . . . +
|
|
// | Optional padding to maintain aligment |
|
|
// + . . . . . . . . . . . . . . . . . . . . . . . . . +
|
|
// +---------------------------------------------------+
|
|
// | Packet #N size [varint, up to 4 bytes] |
|
|
// + - - - - - - - - - - - - - - - - - - - - - - - - - +
|
|
// | Packet #N payload |
|
|
// | A TracePacket protobuf message |
|
|
// +---------------------------------------------------+
|
|
// ...
|
|
// +***************************************************+
|
|
// | Chunk #M header [8 bytes] |
|
|
// ...
|
|
|
|
// Alignment applies to start offset only. The Chunk size is *not* aligned.
|
|
static constexpr uint32_t kChunkAlignment = 4;
|
|
static constexpr uint32_t kChunkShift = 2;
|
|
static constexpr uint32_t kChunkMask = 0x3;
|
|
static constexpr uint32_t kLayoutMask = 0x70000000;
|
|
static constexpr uint32_t kLayoutShift = 28;
|
|
static constexpr uint32_t kAllChunksMask = 0x0FFFFFFF;
|
|
|
|
// This assumes that kChunkComplete == 3.
|
|
static constexpr uint32_t kAllChunksComplete = 0x0FFFFFFF;
|
|
static constexpr uint32_t kAllChunksFree = 0;
|
|
static constexpr size_t kInvalidPageIdx = static_cast<size_t>(-1);
|
|
|
|
// There is one page header per page, at the beginning of the page.
|
|
struct PageHeader {
|
|
// |layout| bits:
|
|
// [31] [30:28] [27:26] ... [1:0]
|
|
// | | | | |
|
|
// | | | | +---------- ChunkState[0]
|
|
// | | | +--------------- ChunkState[12..1]
|
|
// | | +--------------------- ChunkState[13]
|
|
// | +----------------------------- PageLayout (0 == page fully free)
|
|
// +------------------------------------ Reserved for future use
|
|
std::atomic<uint32_t> layout;
|
|
|
|
// If we'll ever going to use this in the future it might come handy
|
|
// reviving the kPageBeingPartitioned logic (look in git log, it was there
|
|
// at some point in the past).
|
|
uint32_t reserved;
|
|
};
|
|
|
|
// There is one Chunk header per chunk (hence PageLayout per page) at the
|
|
// beginning of each chunk.
|
|
struct ChunkHeader {
|
|
enum Flags : uint8_t {
|
|
// If set, the first TracePacket in the chunk is partial and continues
|
|
// from |chunk_id| - 1 (within the same |writer_id|).
|
|
kFirstPacketContinuesFromPrevChunk = 1 << 0,
|
|
|
|
// If set, the last TracePacket in the chunk is partial and continues on
|
|
// |chunk_id| + 1 (within the same |writer_id|).
|
|
kLastPacketContinuesOnNextChunk = 1 << 1,
|
|
|
|
// If set, the last (fragmented) TracePacket in the chunk has holes (even
|
|
// if the chunk is marked as kChunkComplete) that need to be patched
|
|
// out-of-band before the chunk can be read.
|
|
kChunkNeedsPatching = 1 << 2,
|
|
};
|
|
|
|
struct Packets {
|
|
// Number of valid TracePacket protobuf messages contained in the chunk.
|
|
// Each TracePacket is prefixed by its own size. This field is
|
|
// monotonically updated by the Producer with release store semantic when
|
|
// the packet at position |count| is started. This last packet may not be
|
|
// considered complete until |count| is incremented for the subsequent
|
|
// packet or the chunk is completed.
|
|
uint16_t count : 10;
|
|
static constexpr size_t kMaxCount = (1 << 10) - 1;
|
|
|
|
// See Flags above.
|
|
uint16_t flags : 6;
|
|
};
|
|
|
|
// A monotonic counter of the chunk within the scoped of a |writer_id|.
|
|
// The tuple (ProducerID, WriterID, ChunkID) allows to figure out if two
|
|
// chunks are contiguous (and hence a trace packets spanning across them can
|
|
// be glued) or we had some holes due to the ring buffer wrapping.
|
|
// This is set only when transitioning from kChunkFree to kChunkBeingWritten
|
|
// and remains unchanged throughout the remaining lifetime of the chunk.
|
|
std::atomic<uint32_t> chunk_id;
|
|
|
|
// ID of the writer, unique within the producer.
|
|
// Like |chunk_id|, this is set only when transitioning from kChunkFree to
|
|
// kChunkBeingWritten.
|
|
std::atomic<uint16_t> writer_id;
|
|
|
|
// There is no ProducerID here. The service figures that out from the IPC
|
|
// channel, which is unspoofable.
|
|
|
|
// Updated with release-store semantics.
|
|
std::atomic<Packets> packets;
|
|
};
|
|
|
|
class Chunk {
|
|
public:
|
|
Chunk(); // Constructs an invalid chunk.
|
|
|
|
// Chunk is move-only, to document the scope of the Acquire/Release
|
|
// TryLock operations below.
|
|
Chunk(const Chunk&) = delete;
|
|
Chunk operator=(const Chunk&) = delete;
|
|
Chunk(Chunk&&) noexcept;
|
|
Chunk& operator=(Chunk&&);
|
|
|
|
uint8_t* begin() const { return begin_; }
|
|
uint8_t* end() const { return begin_ + size_; }
|
|
|
|
// Size, including Chunk header.
|
|
size_t size() const { return size_; }
|
|
|
|
// Begin of the first packet (or packet fragment).
|
|
uint8_t* payload_begin() const { return begin_ + sizeof(ChunkHeader); }
|
|
size_t payload_size() const {
|
|
PERFETTO_DCHECK(size_ >= sizeof(ChunkHeader));
|
|
return size_ - sizeof(ChunkHeader);
|
|
}
|
|
|
|
bool is_valid() const { return begin_ && size_; }
|
|
|
|
// Index of the chunk within the page [0..13] (13 comes from kPageDiv14).
|
|
uint8_t chunk_idx() const { return chunk_idx_; }
|
|
|
|
ChunkHeader* header() { return reinterpret_cast<ChunkHeader*>(begin_); }
|
|
|
|
uint16_t writer_id() {
|
|
return header()->writer_id.load(std::memory_order_relaxed);
|
|
}
|
|
|
|
// Returns the count of packets and the flags with acquire-load semantics.
|
|
std::pair<uint16_t, uint8_t> GetPacketCountAndFlags() {
|
|
auto packets = header()->packets.load(std::memory_order_acquire);
|
|
const uint16_t packets_count = packets.count;
|
|
const uint8_t packets_flags = packets.flags;
|
|
return std::make_pair(packets_count, packets_flags);
|
|
}
|
|
|
|
// Increases |packets.count| with release semantics (note, however, that the
|
|
// packet count is incremented *before* starting writing a packet). Returns
|
|
// the new packet count. The increment is atomic but NOT race-free (i.e. no
|
|
// CAS). Only the Producer is supposed to perform this increment, and it's
|
|
// supposed to do that in a thread-safe way (holding a lock). A Chunk cannot
|
|
// be shared by multiple Producer threads without locking. The packet count
|
|
// is cleared by TryAcquireChunk(), when passing the new header for the
|
|
// chunk.
|
|
uint16_t IncrementPacketCount() {
|
|
ChunkHeader* chunk_header = header();
|
|
auto packets = chunk_header->packets.load(std::memory_order_relaxed);
|
|
packets.count++;
|
|
chunk_header->packets.store(packets, std::memory_order_release);
|
|
return packets.count;
|
|
}
|
|
|
|
// Increases |packets.count| to the given |packet_count|, but only if
|
|
// |packet_count| is larger than the current value of |packets.count|.
|
|
// Returns the new packet count. Same atomicity guarantees as
|
|
// IncrementPacketCount().
|
|
uint16_t IncreasePacketCountTo(uint16_t packet_count) {
|
|
ChunkHeader* chunk_header = header();
|
|
auto packets = chunk_header->packets.load(std::memory_order_relaxed);
|
|
if (packets.count < packet_count)
|
|
packets.count = packet_count;
|
|
chunk_header->packets.store(packets, std::memory_order_release);
|
|
return packets.count;
|
|
}
|
|
|
|
// Flags are cleared by TryAcquireChunk(), by passing the new header for
|
|
// the chunk.
|
|
void SetFlag(ChunkHeader::Flags flag) {
|
|
ChunkHeader* chunk_header = header();
|
|
auto packets = chunk_header->packets.load(std::memory_order_relaxed);
|
|
packets.flags |= flag;
|
|
chunk_header->packets.store(packets, std::memory_order_release);
|
|
}
|
|
|
|
private:
|
|
friend class SharedMemoryABI;
|
|
Chunk(uint8_t* begin, uint16_t size, uint8_t chunk_idx);
|
|
|
|
// Don't add extra fields, keep the move operator fast.
|
|
uint8_t* begin_ = nullptr;
|
|
uint16_t size_ = 0;
|
|
uint8_t chunk_idx_ = 0;
|
|
};
|
|
|
|
// Construct an instance from an existing shared memory buffer.
|
|
SharedMemoryABI(uint8_t* start, size_t size, size_t page_size);
|
|
SharedMemoryABI();
|
|
|
|
void Initialize(uint8_t* start, size_t size, size_t page_size);
|
|
|
|
uint8_t* start() const { return start_; }
|
|
uint8_t* end() const { return start_ + size_; }
|
|
size_t size() const { return size_; }
|
|
size_t page_size() const { return page_size_; }
|
|
size_t num_pages() const { return num_pages_; }
|
|
bool is_valid() { return num_pages() > 0; }
|
|
|
|
uint8_t* page_start(size_t page_idx) {
|
|
PERFETTO_DCHECK(page_idx < num_pages_);
|
|
return start_ + page_size_ * page_idx;
|
|
}
|
|
|
|
PageHeader* page_header(size_t page_idx) {
|
|
return reinterpret_cast<PageHeader*>(page_start(page_idx));
|
|
}
|
|
|
|
// Returns true if the page is fully clear and has not been partitioned yet.
|
|
// The state of the page can change at any point after this returns (or even
|
|
// before). The Producer should use this only as a hint to decide out whether
|
|
// it should TryPartitionPage() or acquire an individual chunk.
|
|
bool is_page_free(size_t page_idx) {
|
|
return page_header(page_idx)->layout.load(std::memory_order_relaxed) == 0;
|
|
}
|
|
|
|
// Returns true if all chunks in the page are kChunkComplete. As above, this
|
|
// is advisory only. The Service is supposed to use this only to decide
|
|
// whether to TryAcquireAllChunksForReading() or not.
|
|
bool is_page_complete(size_t page_idx) {
|
|
auto layout = page_header(page_idx)->layout.load(std::memory_order_relaxed);
|
|
const uint32_t num_chunks = GetNumChunksForLayout(layout);
|
|
if (num_chunks == 0)
|
|
return false; // Non partitioned pages cannot be complete.
|
|
return (layout & kAllChunksMask) ==
|
|
(kAllChunksComplete & ((1 << (num_chunks * kChunkShift)) - 1));
|
|
}
|
|
|
|
// For testing / debugging only.
|
|
std::string page_header_dbg(size_t page_idx) {
|
|
uint32_t x = page_header(page_idx)->layout.load(std::memory_order_relaxed);
|
|
return std::bitset<32>(x).to_string();
|
|
}
|
|
|
|
// Returns the page layout, which is a bitmap that specifies the chunking
|
|
// layout of the page and each chunk's current state. Reads with an
|
|
// acquire-load semantic to ensure a producer's writes corresponding to an
|
|
// update of the layout (e.g. clearing a chunk's header) are observed
|
|
// consistently.
|
|
uint32_t GetPageLayout(size_t page_idx) {
|
|
return page_header(page_idx)->layout.load(std::memory_order_acquire);
|
|
}
|
|
|
|
// Returns a bitmap in which each bit is set if the corresponding Chunk exists
|
|
// in the page (according to the page layout) and is free. If the page is not
|
|
// partitioned it returns 0 (as if the page had no free chunks).
|
|
uint32_t GetFreeChunks(size_t page_idx);
|
|
|
|
// Tries to atomically partition a page with the given |layout|. Returns true
|
|
// if the page was free and has been partitioned with the given |layout|,
|
|
// false if the page wasn't free anymore by the time we got there.
|
|
// If succeeds all the chunks are atomically set in the kChunkFree state.
|
|
bool TryPartitionPage(size_t page_idx, PageLayout layout);
|
|
|
|
// Tries to atomically mark a single chunk within the page as
|
|
// kChunkBeingWritten. Returns an invalid chunk if the page is not partitioned
|
|
// or the chunk is not in the kChunkFree state. If succeeds sets the chunk
|
|
// header to |header|.
|
|
Chunk TryAcquireChunkForWriting(size_t page_idx,
|
|
size_t chunk_idx,
|
|
const ChunkHeader* header) {
|
|
return TryAcquireChunk(page_idx, chunk_idx, kChunkBeingWritten, header);
|
|
}
|
|
|
|
// Similar to TryAcquireChunkForWriting. Fails if the chunk isn't in the
|
|
// kChunkComplete state.
|
|
Chunk TryAcquireChunkForReading(size_t page_idx, size_t chunk_idx) {
|
|
return TryAcquireChunk(page_idx, chunk_idx, kChunkBeingRead, nullptr);
|
|
}
|
|
|
|
// The caller must have successfully TryAcquireAllChunksForReading().
|
|
Chunk GetChunkUnchecked(size_t page_idx,
|
|
uint32_t page_layout,
|
|
size_t chunk_idx);
|
|
|
|
// Puts a chunk into the kChunkComplete state. Returns the page index.
|
|
size_t ReleaseChunkAsComplete(Chunk chunk) {
|
|
return ReleaseChunk(std::move(chunk), kChunkComplete);
|
|
}
|
|
|
|
// Puts a chunk into the kChunkFree state. Returns the page index.
|
|
size_t ReleaseChunkAsFree(Chunk chunk) {
|
|
return ReleaseChunk(std::move(chunk), kChunkFree);
|
|
}
|
|
|
|
ChunkState GetChunkState(size_t page_idx, size_t chunk_idx) {
|
|
PageHeader* phdr = page_header(page_idx);
|
|
uint32_t layout = phdr->layout.load(std::memory_order_relaxed);
|
|
return GetChunkStateFromLayout(layout, chunk_idx);
|
|
}
|
|
|
|
std::pair<size_t, size_t> GetPageAndChunkIndex(const Chunk& chunk);
|
|
|
|
uint16_t GetChunkSizeForLayout(uint32_t page_layout) const {
|
|
return chunk_sizes_[(page_layout & kLayoutMask) >> kLayoutShift];
|
|
}
|
|
|
|
static ChunkState GetChunkStateFromLayout(uint32_t page_layout,
|
|
size_t chunk_idx) {
|
|
return static_cast<ChunkState>((page_layout >> (chunk_idx * kChunkShift)) &
|
|
kChunkMask);
|
|
}
|
|
|
|
static constexpr uint32_t GetNumChunksForLayout(uint32_t page_layout) {
|
|
return kNumChunksForLayout[(page_layout & kLayoutMask) >> kLayoutShift];
|
|
}
|
|
|
|
// Returns a bitmap in which each bit is set if the corresponding Chunk exists
|
|
// in the page (according to the page layout) and is not free. If the page is
|
|
// not partitioned it returns 0 (as if the page had no used chunks). Bit N
|
|
// corresponds to Chunk N.
|
|
static uint32_t GetUsedChunks(uint32_t page_layout) {
|
|
const uint32_t num_chunks = GetNumChunksForLayout(page_layout);
|
|
uint32_t res = 0;
|
|
for (uint32_t i = 0; i < num_chunks; i++) {
|
|
res |= ((page_layout & kChunkMask) != kChunkFree) ? (1 << i) : 0;
|
|
page_layout >>= kChunkShift;
|
|
}
|
|
return res;
|
|
}
|
|
|
|
private:
|
|
SharedMemoryABI(const SharedMemoryABI&) = delete;
|
|
SharedMemoryABI& operator=(const SharedMemoryABI&) = delete;
|
|
|
|
Chunk TryAcquireChunk(size_t page_idx,
|
|
size_t chunk_idx,
|
|
ChunkState,
|
|
const ChunkHeader*);
|
|
size_t ReleaseChunk(Chunk chunk, ChunkState);
|
|
|
|
uint8_t* start_ = nullptr;
|
|
size_t size_ = 0;
|
|
size_t page_size_ = 0;
|
|
size_t num_pages_ = 0;
|
|
std::array<uint16_t, kNumPageLayouts> chunk_sizes_;
|
|
};
|
|
|
|
} // namespace perfetto
|
|
|
|
#endif // INCLUDE_PERFETTO_EXT_TRACING_CORE_SHARED_MEMORY_ABI_H_
|
|
/*
|
|
* Copyright (C) 2017 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the
|
|
* License. You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing,
|
|
* software distributed under the License is distributed on an "AS
|
|
* IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
|
|
* express or implied. See the License for the specific language
|
|
* governing permissions and limitations under the License.
|
|
*/
|
|
// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/shared_memory_abi.h"
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/base/build_config.h"
|
|
// gen_amalgamated expanded: #include "perfetto/base/time.h"
|
|
|
|
#if !PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
|
|
#include <sys/mman.h>
|
|
#endif
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/utils.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/basic_types.h"
|
|
|
|
namespace perfetto {
|
|
|
|
namespace {
|
|
|
|
constexpr int kRetryAttempts = 64;
|
|
|
|
inline void WaitBeforeNextAttempt(int attempt) {
|
|
if (attempt < kRetryAttempts / 2) {
|
|
std::this_thread::yield();
|
|
} else {
|
|
base::SleepMicroseconds((unsigned(attempt) / 10) * 1000);
|
|
}
|
|
}
|
|
|
|
// Returns the largest 4-bytes aligned chunk size <= |page_size| / |divider|
|
|
// for each divider in PageLayout.
|
|
constexpr size_t GetChunkSize(size_t page_size, size_t divider) {
|
|
return ((page_size - sizeof(SharedMemoryABI::PageHeader)) / divider) & ~3UL;
|
|
}
|
|
|
|
// Initializer for the const |chunk_sizes_| array.
|
|
std::array<uint16_t, SharedMemoryABI::kNumPageLayouts> InitChunkSizes(
|
|
size_t page_size) {
|
|
static_assert(SharedMemoryABI::kNumPageLayouts ==
|
|
base::ArraySize(SharedMemoryABI::kNumChunksForLayout),
|
|
"kNumPageLayouts out of date");
|
|
std::array<uint16_t, SharedMemoryABI::kNumPageLayouts> res = {};
|
|
for (size_t i = 0; i < SharedMemoryABI::kNumPageLayouts; i++) {
|
|
size_t num_chunks = SharedMemoryABI::kNumChunksForLayout[i];
|
|
size_t size = num_chunks == 0 ? 0 : GetChunkSize(page_size, num_chunks);
|
|
PERFETTO_CHECK(size <= std::numeric_limits<uint16_t>::max());
|
|
res[i] = static_cast<uint16_t>(size);
|
|
}
|
|
return res;
|
|
}
|
|
|
|
inline void ClearChunkHeader(SharedMemoryABI::ChunkHeader* header) {
|
|
header->writer_id.store(0u, std::memory_order_relaxed);
|
|
header->chunk_id.store(0u, std::memory_order_relaxed);
|
|
header->packets.store({}, std::memory_order_release);
|
|
}
|
|
|
|
} // namespace
|
|
|
|
// static
|
|
constexpr uint32_t SharedMemoryABI::kNumChunksForLayout[];
|
|
constexpr const char* SharedMemoryABI::kChunkStateStr[];
|
|
constexpr const size_t SharedMemoryABI::kInvalidPageIdx;
|
|
constexpr const size_t SharedMemoryABI::kMinPageSize;
|
|
constexpr const size_t SharedMemoryABI::kMaxPageSize;
|
|
constexpr const size_t SharedMemoryABI::kPacketSizeDropPacket;
|
|
|
|
SharedMemoryABI::SharedMemoryABI() = default;
|
|
|
|
SharedMemoryABI::SharedMemoryABI(uint8_t* start,
|
|
size_t size,
|
|
size_t page_size) {
|
|
Initialize(start, size, page_size);
|
|
}
|
|
|
|
void SharedMemoryABI::Initialize(uint8_t* start,
|
|
size_t size,
|
|
size_t page_size) {
|
|
start_ = start;
|
|
size_ = size;
|
|
page_size_ = page_size;
|
|
num_pages_ = size / page_size;
|
|
chunk_sizes_ = InitChunkSizes(page_size);
|
|
static_assert(sizeof(PageHeader) == 8, "PageHeader size");
|
|
static_assert(sizeof(ChunkHeader) == 8, "ChunkHeader size");
|
|
static_assert(sizeof(ChunkHeader::chunk_id) == sizeof(ChunkID),
|
|
"ChunkID size");
|
|
|
|
static_assert(sizeof(ChunkHeader::Packets) == 2, "ChunkHeader::Packets size");
|
|
static_assert(alignof(ChunkHeader) == kChunkAlignment,
|
|
"ChunkHeader alignment");
|
|
|
|
// In theory std::atomic does not guarantee that the underlying type
|
|
// consists only of the actual atomic word. Theoretically it could have
|
|
// locks or other state. In practice most implementations just implement
|
|
// them without extra state. The code below overlays the atomic into the
|
|
// SMB, hence relies on this implementation detail. This should be fine
|
|
// pragmatically (Chrome's base makes the same assumption), but let's have a
|
|
// check for this.
|
|
static_assert(sizeof(std::atomic<uint32_t>) == sizeof(uint32_t) &&
|
|
sizeof(std::atomic<uint16_t>) == sizeof(uint16_t),
|
|
"Incompatible STL <atomic> implementation");
|
|
|
|
// Chec that the kAllChunks(Complete,Free) are consistent with the
|
|
// ChunkState enum values.
|
|
|
|
// These must be zero because rely on zero-initialized memory being
|
|
// interpreted as "free".
|
|
static_assert(kChunkFree == 0 && kAllChunksFree == 0,
|
|
"kChunkFree/kAllChunksFree and must be 0");
|
|
|
|
static_assert((kAllChunksComplete & kChunkMask) == kChunkComplete,
|
|
"kAllChunksComplete out of sync with kChunkComplete");
|
|
|
|
// Check the consistency of the kMax... constants.
|
|
static_assert(sizeof(ChunkHeader::writer_id) == sizeof(WriterID),
|
|
"WriterID size");
|
|
ChunkHeader chunk_header{};
|
|
chunk_header.chunk_id.store(static_cast<uint32_t>(-1));
|
|
PERFETTO_CHECK(chunk_header.chunk_id.load() == kMaxChunkID);
|
|
|
|
chunk_header.writer_id.store(static_cast<uint16_t>(-1));
|
|
PERFETTO_CHECK(kMaxWriterID <= chunk_header.writer_id.load());
|
|
|
|
PERFETTO_CHECK(page_size >= kMinPageSize);
|
|
PERFETTO_CHECK(page_size <= kMaxPageSize);
|
|
PERFETTO_CHECK(page_size % kMinPageSize == 0);
|
|
PERFETTO_CHECK(reinterpret_cast<uintptr_t>(start) % kMinPageSize == 0);
|
|
PERFETTO_CHECK(size % page_size == 0);
|
|
}
|
|
|
|
SharedMemoryABI::Chunk SharedMemoryABI::GetChunkUnchecked(size_t page_idx,
|
|
uint32_t page_layout,
|
|
size_t chunk_idx) {
|
|
const size_t num_chunks = GetNumChunksForLayout(page_layout);
|
|
PERFETTO_DCHECK(chunk_idx < num_chunks);
|
|
// Compute the chunk virtual address and write it into |chunk|.
|
|
const uint16_t chunk_size = GetChunkSizeForLayout(page_layout);
|
|
size_t chunk_offset_in_page = sizeof(PageHeader) + chunk_idx * chunk_size;
|
|
|
|
Chunk chunk(page_start(page_idx) + chunk_offset_in_page, chunk_size,
|
|
static_cast<uint8_t>(chunk_idx));
|
|
PERFETTO_DCHECK(chunk.end() <= end());
|
|
return chunk;
|
|
}
|
|
|
|
SharedMemoryABI::Chunk SharedMemoryABI::TryAcquireChunk(
|
|
size_t page_idx,
|
|
size_t chunk_idx,
|
|
ChunkState desired_chunk_state,
|
|
const ChunkHeader* header) {
|
|
PERFETTO_DCHECK(desired_chunk_state == kChunkBeingRead ||
|
|
desired_chunk_state == kChunkBeingWritten);
|
|
PageHeader* phdr = page_header(page_idx);
|
|
for (int attempt = 0; attempt < kRetryAttempts; attempt++) {
|
|
uint32_t layout = phdr->layout.load(std::memory_order_acquire);
|
|
const size_t num_chunks = GetNumChunksForLayout(layout);
|
|
|
|
// The page layout has changed (or the page is free).
|
|
if (chunk_idx >= num_chunks)
|
|
return Chunk();
|
|
|
|
// Verify that the chunk is still in a state that allows the transition to
|
|
// |desired_chunk_state|. The only allowed transitions are:
|
|
// 1. kChunkFree -> kChunkBeingWritten (Producer).
|
|
// 2. kChunkComplete -> kChunkBeingRead (Service).
|
|
ChunkState expected_chunk_state =
|
|
desired_chunk_state == kChunkBeingWritten ? kChunkFree : kChunkComplete;
|
|
auto cur_chunk_state = (layout >> (chunk_idx * kChunkShift)) & kChunkMask;
|
|
if (cur_chunk_state != expected_chunk_state)
|
|
return Chunk();
|
|
|
|
uint32_t next_layout = layout;
|
|
next_layout &= ~(kChunkMask << (chunk_idx * kChunkShift));
|
|
next_layout |= (desired_chunk_state << (chunk_idx * kChunkShift));
|
|
if (phdr->layout.compare_exchange_strong(layout, next_layout,
|
|
std::memory_order_acq_rel)) {
|
|
// Compute the chunk virtual address and write it into |chunk|.
|
|
Chunk chunk = GetChunkUnchecked(page_idx, layout, chunk_idx);
|
|
if (desired_chunk_state == kChunkBeingWritten) {
|
|
PERFETTO_DCHECK(header);
|
|
ChunkHeader* new_header = chunk.header();
|
|
new_header->writer_id.store(header->writer_id,
|
|
std::memory_order_relaxed);
|
|
new_header->chunk_id.store(header->chunk_id, std::memory_order_relaxed);
|
|
new_header->packets.store(header->packets, std::memory_order_release);
|
|
}
|
|
return chunk;
|
|
}
|
|
WaitBeforeNextAttempt(attempt);
|
|
}
|
|
return Chunk(); // All our attempts failed.
|
|
}
|
|
|
|
bool SharedMemoryABI::TryPartitionPage(size_t page_idx, PageLayout layout) {
|
|
PERFETTO_DCHECK(layout >= kPageDiv1 && layout <= kPageDiv14);
|
|
uint32_t expected_layout = 0; // Free page.
|
|
uint32_t next_layout = (layout << kLayoutShift) & kLayoutMask;
|
|
PageHeader* phdr = page_header(page_idx);
|
|
if (!phdr->layout.compare_exchange_strong(expected_layout, next_layout,
|
|
std::memory_order_acq_rel)) {
|
|
return false;
|
|
}
|
|
return true;
|
|
}
|
|
|
|
uint32_t SharedMemoryABI::GetFreeChunks(size_t page_idx) {
|
|
uint32_t layout =
|
|
page_header(page_idx)->layout.load(std::memory_order_relaxed);
|
|
const uint32_t num_chunks = GetNumChunksForLayout(layout);
|
|
uint32_t res = 0;
|
|
for (uint32_t i = 0; i < num_chunks; i++) {
|
|
res |= ((layout & kChunkMask) == kChunkFree) ? (1 << i) : 0;
|
|
layout >>= kChunkShift;
|
|
}
|
|
return res;
|
|
}
|
|
|
|
size_t SharedMemoryABI::ReleaseChunk(Chunk chunk,
|
|
ChunkState desired_chunk_state) {
|
|
PERFETTO_DCHECK(desired_chunk_state == kChunkComplete ||
|
|
desired_chunk_state == kChunkFree);
|
|
|
|
size_t page_idx;
|
|
size_t chunk_idx;
|
|
std::tie(page_idx, chunk_idx) = GetPageAndChunkIndex(chunk);
|
|
|
|
// Reset header fields, so that the service can identify when the chunk's
|
|
// header has been initialized by the producer.
|
|
if (desired_chunk_state == kChunkFree)
|
|
ClearChunkHeader(chunk.header());
|
|
|
|
for (int attempt = 0; attempt < kRetryAttempts; attempt++) {
|
|
PageHeader* phdr = page_header(page_idx);
|
|
uint32_t layout = phdr->layout.load(std::memory_order_relaxed);
|
|
const size_t page_chunk_size = GetChunkSizeForLayout(layout);
|
|
|
|
// TODO(primiano): this should not be a CHECK, because a malicious producer
|
|
// could crash us by putting the chunk in an invalid state. This should
|
|
// gracefully fail. Keep a CHECK until then.
|
|
PERFETTO_CHECK(chunk.size() == page_chunk_size);
|
|
const uint32_t chunk_state =
|
|
((layout >> (chunk_idx * kChunkShift)) & kChunkMask);
|
|
|
|
// Verify that the chunk is still in a state that allows the transition to
|
|
// |desired_chunk_state|. The only allowed transitions are:
|
|
// 1. kChunkBeingWritten -> kChunkComplete (Producer).
|
|
// 2. kChunkBeingRead -> kChunkFree (Service).
|
|
ChunkState expected_chunk_state;
|
|
if (desired_chunk_state == kChunkComplete) {
|
|
expected_chunk_state = kChunkBeingWritten;
|
|
} else {
|
|
expected_chunk_state = kChunkBeingRead;
|
|
}
|
|
|
|
// TODO(primiano): should not be a CHECK (same rationale of comment above).
|
|
PERFETTO_CHECK(chunk_state == expected_chunk_state);
|
|
uint32_t next_layout = layout;
|
|
next_layout &= ~(kChunkMask << (chunk_idx * kChunkShift));
|
|
next_layout |= (desired_chunk_state << (chunk_idx * kChunkShift));
|
|
|
|
// If we are freeing a chunk and all the other chunks in the page are free
|
|
// we should de-partition the page and mark it as clear.
|
|
if ((next_layout & kAllChunksMask) == kAllChunksFree)
|
|
next_layout = 0;
|
|
|
|
if (phdr->layout.compare_exchange_strong(layout, next_layout,
|
|
std::memory_order_acq_rel)) {
|
|
return page_idx;
|
|
}
|
|
WaitBeforeNextAttempt(attempt);
|
|
}
|
|
// Too much contention on this page. Give up. This page will be left pending
|
|
// forever but there isn't much more we can do at this point.
|
|
PERFETTO_DFATAL("Too much contention on page.");
|
|
return kInvalidPageIdx;
|
|
}
|
|
|
|
SharedMemoryABI::Chunk::Chunk() = default;
|
|
|
|
SharedMemoryABI::Chunk::Chunk(uint8_t* begin, uint16_t size, uint8_t chunk_idx)
|
|
: begin_(begin), size_(size), chunk_idx_(chunk_idx) {
|
|
PERFETTO_CHECK(reinterpret_cast<uintptr_t>(begin) % kChunkAlignment == 0);
|
|
PERFETTO_CHECK(size > 0);
|
|
}
|
|
|
|
SharedMemoryABI::Chunk::Chunk(Chunk&& o) noexcept {
|
|
*this = std::move(o);
|
|
}
|
|
|
|
SharedMemoryABI::Chunk& SharedMemoryABI::Chunk::operator=(Chunk&& o) {
|
|
begin_ = o.begin_;
|
|
size_ = o.size_;
|
|
chunk_idx_ = o.chunk_idx_;
|
|
o.begin_ = nullptr;
|
|
o.size_ = 0;
|
|
o.chunk_idx_ = 0;
|
|
return *this;
|
|
}
|
|
|
|
std::pair<size_t, size_t> SharedMemoryABI::GetPageAndChunkIndex(
|
|
const Chunk& chunk) {
|
|
PERFETTO_DCHECK(chunk.is_valid());
|
|
PERFETTO_DCHECK(chunk.begin() >= start_);
|
|
PERFETTO_DCHECK(chunk.end() <= start_ + size_);
|
|
|
|
// TODO(primiano): The divisions below could be avoided if we cached
|
|
// |page_shift_|.
|
|
const uintptr_t rel_addr = static_cast<uintptr_t>(chunk.begin() - start_);
|
|
const size_t page_idx = rel_addr / page_size_;
|
|
const size_t offset = rel_addr % page_size_;
|
|
PERFETTO_DCHECK(offset >= sizeof(PageHeader));
|
|
PERFETTO_DCHECK(offset % kChunkAlignment == 0);
|
|
PERFETTO_DCHECK((offset - sizeof(PageHeader)) % chunk.size() == 0);
|
|
const size_t chunk_idx = (offset - sizeof(PageHeader)) / chunk.size();
|
|
PERFETTO_DCHECK(chunk_idx < kMaxChunksPerPage);
|
|
PERFETTO_DCHECK(chunk_idx < GetNumChunksForLayout(GetPageLayout(page_idx)));
|
|
return std::make_pair(page_idx, chunk_idx);
|
|
}
|
|
|
|
} // namespace perfetto
|
|
// gen_amalgamated begin source: src/tracing/core/shared_memory_arbiter_impl.cc
|
|
// gen_amalgamated begin header: src/tracing/core/shared_memory_arbiter_impl.h
|
|
// gen_amalgamated begin header: include/perfetto/ext/base/weak_ptr.h
|
|
/*
|
|
* Copyright (C) 2017 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
#ifndef INCLUDE_PERFETTO_EXT_BASE_WEAK_PTR_H_
|
|
#define INCLUDE_PERFETTO_EXT_BASE_WEAK_PTR_H_
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/thread_checker.h"
|
|
|
|
#include <memory>
|
|
|
|
namespace perfetto {
|
|
namespace base {
|
|
|
|
// A simple WeakPtr for single-threaded cases.
|
|
// Generally keep the WeakPtrFactory as last fields in classes: it makes the
|
|
// WeakPtr(s) invalidate as first thing in the class dtor.
|
|
// Usage:
|
|
// class MyClass {
|
|
// MyClass() : weak_factory_(this) {}
|
|
// WeakPtr<MyClass> GetWeakPtr() { return weak_factory_.GetWeakPtr(); }
|
|
//
|
|
// private:
|
|
// WeakPtrFactory<MyClass> weak_factory_;
|
|
// }
|
|
//
|
|
// int main() {
|
|
// std::unique_ptr<MyClass> foo(new MyClass);
|
|
// auto wptr = foo.GetWeakPtr();
|
|
// ASSERT_TRUE(wptr);
|
|
// ASSERT_EQ(foo.get(), wptr->get());
|
|
// foo.reset();
|
|
// ASSERT_FALSE(wptr);
|
|
// ASSERT_EQ(nullptr, wptr->get());
|
|
// }
|
|
|
|
template <typename T>
|
|
class WeakPtrFactory; // Forward declaration, defined below.
|
|
|
|
template <typename T>
|
|
class WeakPtr {
|
|
public:
|
|
WeakPtr() {}
|
|
WeakPtr(const WeakPtr&) = default;
|
|
WeakPtr& operator=(const WeakPtr&) = default;
|
|
WeakPtr(WeakPtr&&) = default;
|
|
WeakPtr& operator=(WeakPtr&&) = default;
|
|
|
|
T* get() const {
|
|
PERFETTO_DCHECK_THREAD(thread_checker);
|
|
return handle_ ? *handle_.get() : nullptr;
|
|
}
|
|
T* operator->() const { return get(); }
|
|
T& operator*() const { return *get(); }
|
|
|
|
explicit operator bool() const { return !!get(); }
|
|
|
|
private:
|
|
friend class WeakPtrFactory<T>;
|
|
explicit WeakPtr(const std::shared_ptr<T*>& handle) : handle_(handle) {}
|
|
|
|
std::shared_ptr<T*> handle_;
|
|
PERFETTO_THREAD_CHECKER(thread_checker)
|
|
};
|
|
|
|
template <typename T>
|
|
class WeakPtrFactory {
|
|
public:
|
|
explicit WeakPtrFactory(T* owner)
|
|
: weak_ptr_(std::shared_ptr<T*>(new T* {owner})) {
|
|
PERFETTO_DCHECK_THREAD(thread_checker);
|
|
}
|
|
|
|
~WeakPtrFactory() {
|
|
PERFETTO_DCHECK_THREAD(thread_checker);
|
|
*(weak_ptr_.handle_.get()) = nullptr;
|
|
}
|
|
|
|
// Can be safely called on any thread, since it simply copies |weak_ptr_|.
|
|
// Note that any accesses to the returned pointer need to be made on the
|
|
// thread that created/reset the factory.
|
|
WeakPtr<T> GetWeakPtr() const { return weak_ptr_; }
|
|
|
|
// Reset the factory to a new owner & thread. May only be called before any
|
|
// weak pointers were passed out. Future weak pointers will be valid on the
|
|
// calling thread.
|
|
void Reset(T* owner) {
|
|
// Reset thread checker to current thread.
|
|
PERFETTO_DETACH_FROM_THREAD(thread_checker);
|
|
PERFETTO_DCHECK_THREAD(thread_checker);
|
|
|
|
// We should not have passed out any weak pointers yet at this point.
|
|
PERFETTO_DCHECK(weak_ptr_.handle_.use_count() == 1);
|
|
|
|
weak_ptr_ = WeakPtr<T>(std::shared_ptr<T*>(new T* {owner}));
|
|
}
|
|
|
|
private:
|
|
WeakPtrFactory(const WeakPtrFactory&) = delete;
|
|
WeakPtrFactory& operator=(const WeakPtrFactory&) = delete;
|
|
|
|
WeakPtr<T> weak_ptr_;
|
|
PERFETTO_THREAD_CHECKER(thread_checker)
|
|
};
|
|
|
|
} // namespace base
|
|
} // namespace perfetto
|
|
|
|
#endif // INCLUDE_PERFETTO_EXT_BASE_WEAK_PTR_H_
|
|
// gen_amalgamated begin header: include/perfetto/ext/tracing/core/shared_memory_arbiter.h
|
|
// gen_amalgamated begin header: include/perfetto/ext/tracing/core/tracing_service.h
|
|
// gen_amalgamated begin header: include/perfetto/ext/tracing/core/shared_memory.h
|
|
/*
|
|
* Copyright (C) 2017 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
#ifndef INCLUDE_PERFETTO_EXT_TRACING_CORE_SHARED_MEMORY_H_
|
|
#define INCLUDE_PERFETTO_EXT_TRACING_CORE_SHARED_MEMORY_H_
|
|
|
|
#include <stddef.h>
|
|
|
|
#include <memory>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/base/export.h"
|
|
|
|
namespace perfetto {
|
|
|
|
// An abstract interface that models the shared memory region shared between
|
|
// Service and Producer. The concrete implementation of this is up to the
|
|
// transport layer. This can be as simple as a malloc()-ed buffer, if both
|
|
// Producer and Service are hosted in the same process, or some posix shared
|
|
// memory for the out-of-process case (see src/unix_rpc).
|
|
// Both this class and the Factory are subclassed by the transport layer, which
|
|
// will attach platform specific fields to it (e.g., a unix file descriptor).
|
|
class PERFETTO_EXPORT SharedMemory {
|
|
public:
|
|
class PERFETTO_EXPORT Factory {
|
|
public:
|
|
virtual ~Factory();
|
|
virtual std::unique_ptr<SharedMemory> CreateSharedMemory(size_t) = 0;
|
|
};
|
|
|
|
// The transport layer is expected to tear down the resource associated to
|
|
// this object region when destroyed.
|
|
virtual ~SharedMemory();
|
|
|
|
virtual void* start() const = 0;
|
|
virtual size_t size() const = 0;
|
|
virtual int fd() const = 0;
|
|
};
|
|
|
|
} // namespace perfetto
|
|
|
|
#endif // INCLUDE_PERFETTO_EXT_TRACING_CORE_SHARED_MEMORY_H_
|
|
/*
|
|
* Copyright (C) 2017 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
#ifndef INCLUDE_PERFETTO_EXT_TRACING_CORE_TRACING_SERVICE_H_
|
|
#define INCLUDE_PERFETTO_EXT_TRACING_CORE_TRACING_SERVICE_H_
|
|
|
|
#include <stdint.h>
|
|
|
|
#include <functional>
|
|
#include <memory>
|
|
#include <vector>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/base/export.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/scoped_file.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/basic_types.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/shared_memory.h"
|
|
// gen_amalgamated expanded: #include "perfetto/tracing/buffer_exhausted_policy.h"
|
|
// gen_amalgamated expanded: #include "perfetto/tracing/core/forward_decls.h"
|
|
|
|
namespace perfetto {
|
|
|
|
namespace base {
|
|
class TaskRunner;
|
|
} // namespace base
|
|
|
|
class Consumer;
|
|
class Producer;
|
|
class SharedMemoryArbiter;
|
|
class TraceWriter;
|
|
|
|
// TODO: for the moment this assumes that all the calls happen on the same
|
|
// thread/sequence. Not sure this will be the case long term in Chrome.
|
|
|
|
// The API for the Producer port of the Service.
|
|
// Subclassed by:
|
|
// 1. The tracing_service_impl.cc business logic when returning it in response
|
|
// to the ConnectProducer() method.
|
|
// 2. The transport layer (e.g., src/ipc) when the producer and
|
|
// the service don't talk locally but via some IPC mechanism.
|
|
class PERFETTO_EXPORT ProducerEndpoint {
|
|
public:
|
|
virtual ~ProducerEndpoint();
|
|
|
|
// Called by the Producer to (un)register data sources. Data sources are
|
|
// identified by their name (i.e. DataSourceDescriptor.name)
|
|
virtual void RegisterDataSource(const DataSourceDescriptor&) = 0;
|
|
virtual void UnregisterDataSource(const std::string& name) = 0;
|
|
|
|
// Associate the trace writer with the given |writer_id| with
|
|
// |target_buffer|. The service may use this information to retrieve and
|
|
// copy uncommitted chunks written by the trace writer into its associated
|
|
// buffer, e.g. when a producer process crashes or when a flush is
|
|
// necessary.
|
|
virtual void RegisterTraceWriter(uint32_t writer_id,
|
|
uint32_t target_buffer) = 0;
|
|
|
|
// Remove the association of the trace writer previously created via
|
|
// RegisterTraceWriter.
|
|
virtual void UnregisterTraceWriter(uint32_t writer_id) = 0;
|
|
|
|
// Called by the Producer to signal that some pages in the shared memory
|
|
// buffer (shared between Service and Producer) have changed.
|
|
// When the Producer and the Service are hosted in the same process and
|
|
// hence potentially live on the same task runner, This method must call
|
|
// TracingServiceImpl's CommitData synchronously, without any PostTask()s,
|
|
// if on the same thread. This is to avoid a deadlock where the Producer
|
|
// exhausts its SMB and stalls waiting for the service to catch up with
|
|
// reads, but the Service never gets to that because it lives on the same
|
|
// thread.
|
|
using CommitDataCallback = std::function<void()>;
|
|
virtual void CommitData(const CommitDataRequest&,
|
|
CommitDataCallback callback = {}) = 0;
|
|
|
|
virtual SharedMemory* shared_memory() const = 0;
|
|
|
|
// Size of shared memory buffer pages. It's always a multiple of 4K.
|
|
// See shared_memory_abi.h
|
|
virtual size_t shared_buffer_page_size_kb() const = 0;
|
|
|
|
// Creates a trace writer, which allows to create events, handling the
|
|
// underying shared memory buffer and signalling to the Service. This method
|
|
// is thread-safe but the returned object is not. A TraceWriter should be
|
|
// used only from a single thread, or the caller has to handle sequencing
|
|
// via a mutex or equivalent. This method can only be called if
|
|
// TracingService::ConnectProducer was called with |in_process=true|.
|
|
// Args:
|
|
// |target_buffer| is the target buffer ID where the data produced by the
|
|
// writer should be stored by the tracing service. This value is passed
|
|
// upon creation of the data source (StartDataSource()) in the
|
|
// DataSourceConfig.target_buffer().
|
|
virtual std::unique_ptr<TraceWriter> CreateTraceWriter(
|
|
BufferID target_buffer,
|
|
BufferExhaustedPolicy buffer_exhausted_policy =
|
|
BufferExhaustedPolicy::kDefault) = 0;
|
|
|
|
// TODO(eseckler): Also expose CreateStartupTraceWriter() ?
|
|
|
|
// In some cases you can access the producer's SharedMemoryArbiter (for
|
|
// example if TracingService::ConnectProducer is called with
|
|
// |in_process=true|). The SharedMemoryArbiter can be used to create
|
|
// TraceWriters which is able to directly commit chunks. For the
|
|
// |in_process=true| case this can be done without going through an IPC layer.
|
|
virtual SharedMemoryArbiter* MaybeSharedMemoryArbiter() = 0;
|
|
|
|
// Whether the service accepted a shared memory buffer provided by the
|
|
// producer.
|
|
virtual bool IsShmemProvidedByProducer() const = 0;
|
|
|
|
// Called in response to a Producer::Flush(request_id) call after all data
|
|
// for the flush request has been committed.
|
|
virtual void NotifyFlushComplete(FlushRequestID) = 0;
|
|
|
|
// Called in response to one or more Producer::StartDataSource(),
|
|
// if the data source registered setting the flag
|
|
// DataSourceDescriptor.will_notify_on_start.
|
|
virtual void NotifyDataSourceStarted(DataSourceInstanceID) = 0;
|
|
|
|
// Called in response to one or more Producer::StopDataSource(),
|
|
// if the data source registered setting the flag
|
|
// DataSourceDescriptor.will_notify_on_stop.
|
|
virtual void NotifyDataSourceStopped(DataSourceInstanceID) = 0;
|
|
|
|
// This informs the service to activate any of these triggers if any tracing
|
|
// session was waiting for them.
|
|
virtual void ActivateTriggers(const std::vector<std::string>&) = 0;
|
|
|
|
// Emits a synchronization barrier to linearize with the service. When
|
|
// |callback| is invoked, the caller has the guarantee that the service has
|
|
// seen and processed all the requests sent by this producer prior to the
|
|
// Sync() call. Used mainly in tests.
|
|
virtual void Sync(std::function<void()> callback) = 0;
|
|
}; // class ProducerEndpoint.
|
|
|
|
// The API for the Consumer port of the Service.
|
|
// Subclassed by:
|
|
// 1. The tracing_service_impl.cc business logic when returning it in response
|
|
// to
|
|
// the ConnectConsumer() method.
|
|
// 2. The transport layer (e.g., src/ipc) when the consumer and
|
|
// the service don't talk locally but via some IPC mechanism.
|
|
class PERFETTO_EXPORT ConsumerEndpoint {
|
|
public:
|
|
virtual ~ConsumerEndpoint();
|
|
|
|
// Enables tracing with the given TraceConfig. The ScopedFile argument is
|
|
// used only when TraceConfig.write_into_file == true.
|
|
// If TraceConfig.deferred_start == true data sources are configured via
|
|
// SetupDataSource() but are not started until StartTracing() is called.
|
|
// This is to support pre-initialization and fast triggering of traces.
|
|
// The ScopedFile argument is used only when TraceConfig.write_into_file
|
|
// == true.
|
|
virtual void EnableTracing(const TraceConfig&,
|
|
base::ScopedFile = base::ScopedFile()) = 0;
|
|
|
|
// Update the trace config of an existing tracing session; only a subset
|
|
// of options can be changed mid-session. Currently the only
|
|
// supported functionality is expanding the list of producer_name_filters()
|
|
// (or removing the filter entirely) for existing data sources.
|
|
virtual void ChangeTraceConfig(const TraceConfig&) = 0;
|
|
|
|
// Starts all data sources configured in the trace config. This is used only
|
|
// after calling EnableTracing() with TraceConfig.deferred_start=true.
|
|
// It's a no-op if called after a regular EnableTracing(), without setting
|
|
// deferred_start.
|
|
virtual void StartTracing() = 0;
|
|
|
|
virtual void DisableTracing() = 0;
|
|
|
|
// Requests all data sources to flush their data immediately and invokes the
|
|
// passed callback once all of them have acked the flush (in which case
|
|
// the callback argument |success| will be true) or |timeout_ms| are elapsed
|
|
// (in which case |success| will be false).
|
|
// If |timeout_ms| is 0 the TraceConfig's flush_timeout_ms is used, or,
|
|
// if that one is not set (or is set to 0), kDefaultFlushTimeoutMs (5s) is
|
|
// used.
|
|
using FlushCallback = std::function<void(bool /*success*/)>;
|
|
virtual void Flush(uint32_t timeout_ms, FlushCallback) = 0;
|
|
|
|
// Tracing data will be delivered invoking Consumer::OnTraceData().
|
|
virtual void ReadBuffers() = 0;
|
|
|
|
virtual void FreeBuffers() = 0;
|
|
|
|
// Will call OnDetach().
|
|
virtual void Detach(const std::string& key) = 0;
|
|
|
|
// Will call OnAttach().
|
|
virtual void Attach(const std::string& key) = 0;
|
|
|
|
// Will call OnTraceStats().
|
|
virtual void GetTraceStats() = 0;
|
|
|
|
// Start or stop observing events of selected types. |events_mask| specifies
|
|
// the types of events to observe in a bitmask of ObservableEvents::Type.
|
|
// To disable observing, pass 0.
|
|
// Will call OnObservableEvents() repeatedly whenever an event of an enabled
|
|
// ObservableEventType occurs.
|
|
// TODO(eseckler): Extend this to support producers & data sources.
|
|
virtual void ObserveEvents(uint32_t events_mask) = 0;
|
|
|
|
// Used to obtain the list of connected data sources and other info about
|
|
// the tracing service.
|
|
using QueryServiceStateCallback =
|
|
std::function<void(bool success, const TracingServiceState&)>;
|
|
virtual void QueryServiceState(QueryServiceStateCallback) = 0;
|
|
|
|
// Used for feature detection. Makes sense only when the consumer and the
|
|
// service talk over IPC and can be from different versions.
|
|
using QueryCapabilitiesCallback =
|
|
std::function<void(const TracingServiceCapabilities&)>;
|
|
virtual void QueryCapabilities(QueryCapabilitiesCallback) = 0;
|
|
}; // class ConsumerEndpoint.
|
|
|
|
// The public API of the tracing Service business logic.
|
|
//
|
|
// Exposed to:
|
|
// 1. The transport layer (e.g., src/unix_rpc/unix_service_host.cc),
|
|
// which forwards commands received from a remote producer or consumer to
|
|
// the actual service implementation.
|
|
// 2. Tests.
|
|
//
|
|
// Subclassed by:
|
|
// The service business logic in src/core/tracing_service_impl.cc.
|
|
class PERFETTO_EXPORT TracingService {
|
|
public:
|
|
using ProducerEndpoint = perfetto::ProducerEndpoint;
|
|
using ConsumerEndpoint = perfetto::ConsumerEndpoint;
|
|
|
|
enum class ProducerSMBScrapingMode {
|
|
// Use service's default setting for SMB scraping. Currently, the default
|
|
// mode is to disable SMB scraping, but this may change in the future.
|
|
kDefault,
|
|
|
|
// Enable scraping of uncommitted chunks in producers' shared memory
|
|
// buffers.
|
|
kEnabled,
|
|
|
|
// Disable scraping of uncommitted chunks in producers' shared memory
|
|
// buffers.
|
|
kDisabled
|
|
};
|
|
|
|
// Implemented in src/core/tracing_service_impl.cc .
|
|
static std::unique_ptr<TracingService> CreateInstance(
|
|
std::unique_ptr<SharedMemory::Factory>,
|
|
base::TaskRunner*);
|
|
|
|
virtual ~TracingService();
|
|
|
|
// Connects a Producer instance and obtains a ProducerEndpoint, which is
|
|
// essentially a 1:1 channel between one Producer and the Service.
|
|
//
|
|
// The caller has to guarantee that the passed Producer will be alive as long
|
|
// as the returned ProducerEndpoint is alive. Both the passed Producer and the
|
|
// returned ProducerEndpoint must live on the same task runner of the service,
|
|
// specifically:
|
|
// 1) The Service will call Producer::* methods on the Service's task runner.
|
|
// 2) The Producer should call ProducerEndpoint::* methods only on the
|
|
// service's task runner, except for ProducerEndpoint::CreateTraceWriter(),
|
|
// which can be called on any thread. To disconnect just destroy the
|
|
// returned ProducerEndpoint object. It is safe to destroy the Producer
|
|
// once the Producer::OnDisconnect() has been invoked.
|
|
//
|
|
// |uid| is the trusted user id of the producer process, used by the consumers
|
|
// for validating the origin of trace data. |shared_memory_size_hint_bytes|
|
|
// and |shared_memory_page_size_hint_bytes| are optional hints on the size of
|
|
// the shared memory buffer and its pages. The service can ignore the hints
|
|
// (e.g., if the hints are unreasonably large or other sizes were configured
|
|
// in a tracing session's config). |in_process| enables the ProducerEndpoint
|
|
// to manage its own shared memory and enables use of
|
|
// |ProducerEndpoint::CreateTraceWriter|.
|
|
//
|
|
// The producer can optionally provide a non-null |shm|, which the service
|
|
// will adopt for the connection to the producer, provided it is correctly
|
|
// sized. In this case, |shared_memory_page_size_hint_bytes| indicates the
|
|
// page size used in this SMB. The producer can use this mechanism to record
|
|
// tracing data to an SMB even before the tracing session is started by the
|
|
// service. This is used in Chrome to implement startup tracing. If the buffer
|
|
// is incorrectly sized, the service will discard the SMB and allocate a new
|
|
// one, provided to the producer via ProducerEndpoint::shared_memory() after
|
|
// OnTracingSetup(). To verify that the service accepted the SMB, the producer
|
|
// may check via ProducerEndpoint::IsShmemProvidedByProducer(). If the service
|
|
// accepted the SMB, the producer can then commit any data that is already in
|
|
// the SMB after the tracing session was started by the service via
|
|
// Producer::StartDataSource(). The |shm| will also be rejected when
|
|
// connecting to a service that is too old (pre Android-11).
|
|
//
|
|
// Can return null in the unlikely event that service has too many producers
|
|
// connected.
|
|
virtual std::unique_ptr<ProducerEndpoint> ConnectProducer(
|
|
Producer*,
|
|
uid_t uid,
|
|
const std::string& name,
|
|
size_t shared_memory_size_hint_bytes = 0,
|
|
bool in_process = false,
|
|
ProducerSMBScrapingMode smb_scraping_mode =
|
|
ProducerSMBScrapingMode::kDefault,
|
|
size_t shared_memory_page_size_hint_bytes = 0,
|
|
std::unique_ptr<SharedMemory> shm = nullptr) = 0;
|
|
|
|
// Connects a Consumer instance and obtains a ConsumerEndpoint, which is
|
|
// essentially a 1:1 channel between one Consumer and the Service.
|
|
// The caller has to guarantee that the passed Consumer will be alive as long
|
|
// as the returned ConsumerEndpoint is alive.
|
|
// To disconnect just destroy the returned ConsumerEndpoint object. It is safe
|
|
// to destroy the Consumer once the Consumer::OnDisconnect() has been invoked.
|
|
virtual std::unique_ptr<ConsumerEndpoint> ConnectConsumer(Consumer*,
|
|
uid_t) = 0;
|
|
|
|
// Enable/disable scraping of chunks in the shared memory buffer. If enabled,
|
|
// the service will copy uncommitted but non-empty chunks from the SMB when
|
|
// flushing (e.g. to handle unresponsive producers or producers unable to
|
|
// flush their active chunks), on producer disconnect (e.g. to recover data
|
|
// from crashed producers), and after disabling a tracing session (e.g. to
|
|
// gather data from producers that didn't stop their data sources in time).
|
|
//
|
|
// This feature is currently used by Chrome.
|
|
virtual void SetSMBScrapingEnabled(bool enabled) = 0;
|
|
};
|
|
|
|
} // namespace perfetto
|
|
|
|
#endif // INCLUDE_PERFETTO_EXT_TRACING_CORE_TRACING_SERVICE_H_
|
|
/*
|
|
* Copyright (C) 2018 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
#ifndef INCLUDE_PERFETTO_EXT_TRACING_CORE_SHARED_MEMORY_ARBITER_H_
|
|
#define INCLUDE_PERFETTO_EXT_TRACING_CORE_SHARED_MEMORY_ARBITER_H_
|
|
|
|
#include <stddef.h>
|
|
|
|
#include <functional>
|
|
#include <memory>
|
|
#include <vector>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/base/export.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/basic_types.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/tracing_service.h"
|
|
// gen_amalgamated expanded: #include "perfetto/tracing/buffer_exhausted_policy.h"
|
|
|
|
namespace perfetto {
|
|
|
|
namespace base {
|
|
class TaskRunner;
|
|
}
|
|
|
|
class SharedMemory;
|
|
class TraceWriter;
|
|
|
|
// Used by the Producer-side of the transport layer to vend TraceWriters
|
|
// from the SharedMemory it receives from the Service-side.
|
|
class PERFETTO_EXPORT SharedMemoryArbiter {
|
|
public:
|
|
virtual ~SharedMemoryArbiter();
|
|
|
|
// Creates a new TraceWriter and assigns it a new WriterID. The WriterID is
|
|
// written in each chunk header owned by a given TraceWriter and is used by
|
|
// the Service to reconstruct TracePackets written by the same TraceWriter.
|
|
// Returns null impl of TraceWriter if all WriterID slots are exhausted. The
|
|
// writer will commit to the provided |target_buffer|. If the arbiter was
|
|
// created via CreateUnbound(), only BufferExhaustedPolicy::kDrop is
|
|
// supported.
|
|
virtual std::unique_ptr<TraceWriter> CreateTraceWriter(
|
|
BufferID target_buffer,
|
|
BufferExhaustedPolicy buffer_exhausted_policy =
|
|
BufferExhaustedPolicy::kDefault) = 0;
|
|
|
|
// Creates a TraceWriter that will commit to the target buffer with the given
|
|
// reservation ID (creating a new reservation for this ID if none exists yet).
|
|
// The buffer reservation should be bound to an actual BufferID via
|
|
// BindStartupTargetBuffer() once the actual BufferID is known. Only supported
|
|
// if the arbiter was created using CreateUnbound(), and may be called while
|
|
// the arbiter is unbound.
|
|
//
|
|
// While any unbound buffer reservation exists, all commits will be buffered
|
|
// until all reservations were bound. Thus, until all reservations are bound,
|
|
// the data written to the SMB will not be consumed by the service - the SMB
|
|
// size should be chosen with this in mind. Startup writers always use
|
|
// BufferExhaustedPolicy::kDrop, as we cannot feasibly stall while not
|
|
// flushing to the service.
|
|
//
|
|
// The |target_buffer_reservation_id| should be greater than 0 but can
|
|
// otherwise be freely chosen by the producer and is only used to translate
|
|
// packets into the actual buffer id once
|
|
// BindStartupTargetBuffer(reservation_id) is called. For example, Chrome uses
|
|
// startup tracing not only for the first, but also subsequent tracing
|
|
// sessions (to enable tracing in the browser process before it instructs the
|
|
// tracing service to start tracing asynchronously, minimizing trace data loss
|
|
// in the meantime), and increments the reservation ID between sessions.
|
|
// Similarly, if more than a single target buffer per session is required
|
|
// (e.g. for two different data sources), different reservation IDs should be
|
|
// chosen for different targets buffers.
|
|
virtual std::unique_ptr<TraceWriter> CreateStartupTraceWriter(
|
|
uint16_t target_buffer_reservation_id) = 0;
|
|
|
|
// Should only be called on unbound SharedMemoryArbiters. Binds the arbiter to
|
|
// the provided ProducerEndpoint and TaskRunner. Should be called only once
|
|
// and on the provided |TaskRunner|. Usually called by the producer (i.e., no
|
|
// specific data source) once it connects to the service. Both the endpoint
|
|
// and task runner should remain valid for the remainder of the arbiter's
|
|
// lifetime.
|
|
virtual void BindToProducerEndpoint(TracingService::ProducerEndpoint*,
|
|
base::TaskRunner*) = 0;
|
|
|
|
// Binds commits from TraceWriters created via CreateStartupTraceWriter() with
|
|
// the given |target_buffer_reservation_id| to |target_buffer_id|. May only be
|
|
// called once per |target_buffer_reservation_id|. Should be called on the
|
|
// arbiter's TaskRunner, and after BindToProducerEndpoint() was called.
|
|
// Usually, it is called by a specific data source, after it received its
|
|
// configuration (including the target buffer ID) from the service.
|
|
virtual void BindStartupTargetBuffer(uint16_t target_buffer_reservation_id,
|
|
BufferID target_buffer_id) = 0;
|
|
|
|
// Treat the reservation as resolved to an invalid buffer. Commits for this
|
|
// reservation will be flushed to the service ASAP. The service will free
|
|
// committed chunks but otherwise ignore them. The producer can call this
|
|
// method, for example, if connection to the tracing service failed or the
|
|
// session was stopped concurrently before the connection was established.
|
|
virtual void AbortStartupTracingForReservation(
|
|
uint16_t target_buffer_reservation_id) = 0;
|
|
|
|
// Notifies the service that all data for the given FlushRequestID has been
|
|
// committed in the shared memory buffer. Should only be called while bound.
|
|
virtual void NotifyFlushComplete(FlushRequestID) = 0;
|
|
|
|
// Sets the duration during which commits are batched. Args:
|
|
// |batch_commits_duration_ms|: The length of the period, during which commits
|
|
// by all trace writers are accumulated, before being sent to the service.
|
|
// When the period ends, all accumulated commits are flushed. On the first
|
|
// commit after the last flush, another delayed flush is scheduled to run in
|
|
// |batch_commits_duration_ms|. If an immediate flush occurs (via
|
|
// FlushPendingCommitDataRequests()) during a batching period, any
|
|
// accumulated commits up to that point will be sent to the service
|
|
// immediately. And when the batching period ends, the commits that occurred
|
|
// after the immediate flush will also be sent to the service.
|
|
//
|
|
// If the duration has already been set to a non-zero value before this method
|
|
// is called, and there is already a scheduled flush with the previously-set
|
|
// duration, the new duration will take effect after the scheduled flush
|
|
// occurs.
|
|
//
|
|
// If |batch_commits_duration_ms| is non-zero, batched data that hasn't been
|
|
// sent could be lost at the end of a tracing session. To avoid this,
|
|
// producers should make sure that FlushPendingCommitDataRequests is called
|
|
// after the last TraceWriter write and before the service has stopped
|
|
// listening for commits from the tracing session's data sources (i.e.
|
|
// data sources should stop asynchronously, see
|
|
// DataSourceDescriptor.will_notify_on_stop=true).
|
|
virtual void SetBatchCommitsDuration(uint32_t batch_commits_duration_ms) = 0;
|
|
|
|
// Forces an immediate commit of the completed packets, without waiting for
|
|
// the next task or for a batching period to end. Should only be called while
|
|
// bound.
|
|
virtual void FlushPendingCommitDataRequests(
|
|
std::function<void()> callback = {}) = 0;
|
|
|
|
// Create a bound arbiter instance. Args:
|
|
// |SharedMemory|: the shared memory buffer to use.
|
|
// |page_size|: a multiple of 4KB that defines the granularity of tracing
|
|
// pages. See tradeoff considerations in shared_memory_abi.h.
|
|
// |ProducerEndpoint|: The service's producer endpoint used e.g. to commit
|
|
// chunks and register trace writers.
|
|
// |TaskRunner|: Task runner for perfetto's main thread, which executes the
|
|
// OnPagesCompleteCallback and IPC calls to the |ProducerEndpoint|.
|
|
//
|
|
// Implemented in src/core/shared_memory_arbiter_impl.cc.
|
|
static std::unique_ptr<SharedMemoryArbiter> CreateInstance(
|
|
SharedMemory*,
|
|
size_t page_size,
|
|
TracingService::ProducerEndpoint*,
|
|
base::TaskRunner*);
|
|
|
|
// Create an unbound arbiter instance, which should later be bound to a
|
|
// ProducerEndpoint and TaskRunner by calling BindToProducerEndpoint(). The
|
|
// returned arbiter will ONLY support trace writers with
|
|
// BufferExhaustedPolicy::kDrop.
|
|
//
|
|
// An unbound SharedMemoryArbiter can be used to write to a producer-created
|
|
// SharedMemory buffer before the producer connects to the tracing service.
|
|
// The producer can then pass this SMB to the service when it connects (see
|
|
// TracingService::ConnectProducer).
|
|
//
|
|
// To trace into the SMB before the service starts the tracing session, trace
|
|
// writers can be obtained via CreateStartupTraceWriter() and later associated
|
|
// with a target buffer via BindStartupTargetBuffer(), once the target buffer
|
|
// is known.
|
|
//
|
|
// Implemented in src/core/shared_memory_arbiter_impl.cc. See CreateInstance()
|
|
// for comments about the arguments.
|
|
static std::unique_ptr<SharedMemoryArbiter> CreateUnboundInstance(
|
|
SharedMemory*,
|
|
size_t page_size);
|
|
};
|
|
|
|
} // namespace perfetto
|
|
|
|
#endif // INCLUDE_PERFETTO_EXT_TRACING_CORE_SHARED_MEMORY_ARBITER_H_
|
|
/*
|
|
* Copyright (C) 2017 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
#ifndef SRC_TRACING_CORE_SHARED_MEMORY_ARBITER_IMPL_H_
|
|
#define SRC_TRACING_CORE_SHARED_MEMORY_ARBITER_IMPL_H_
|
|
|
|
#include <stdint.h>
|
|
|
|
#include <functional>
|
|
#include <map>
|
|
#include <memory>
|
|
#include <mutex>
|
|
#include <vector>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/weak_ptr.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/basic_types.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/shared_memory_abi.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/shared_memory_arbiter.h"
|
|
// gen_amalgamated expanded: #include "perfetto/tracing/core/forward_decls.h"
|
|
// gen_amalgamated expanded: #include "src/tracing/core/id_allocator.h"
|
|
|
|
namespace perfetto {
|
|
|
|
class PatchList;
|
|
class TraceWriter;
|
|
class TraceWriterImpl;
|
|
|
|
namespace base {
|
|
class TaskRunner;
|
|
} // namespace base
|
|
|
|
// This class handles the shared memory buffer on the producer side. It is used
|
|
// to obtain thread-local chunks and to partition pages from several threads.
|
|
// There is one arbiter instance per Producer.
|
|
// This class is thread-safe and uses locks to do so. Data sources are supposed
|
|
// to interact with this sporadically, only when they run out of space on their
|
|
// current thread-local chunk.
|
|
//
|
|
// When the arbiter is created using CreateUnboundInstance(), the following
|
|
// state transitions are possible:
|
|
//
|
|
// [ !fully_bound_, !endpoint_, 0 unbound buffer reservations ]
|
|
// | |
|
|
// | | CreateStartupTraceWriter(buf)
|
|
// | | buffer reservations += buf
|
|
// | |
|
|
// | | ----
|
|
// | | | | CreateStartupTraceWriter(buf)
|
|
// | | | | buffer reservations += buf
|
|
// | V | V
|
|
// | [ !fully_bound_, !endpoint_, >=1 unbound buffer reservations ]
|
|
// | |
|
|
// | BindToProducerEndpoint() |
|
|
// | |
|
|
// | BindToProducerEndpoint() |
|
|
// | V
|
|
// | [ !fully_bound_, endpoint_, >=1 unbound buffer reservations ]
|
|
// | A | A | A
|
|
// | | | | | |
|
|
// | | ---- | |
|
|
// | | CreateStartupTraceWriter(buf) | |
|
|
// | | buffer reservations += buf | |
|
|
// | | | |
|
|
// | | CreateStartupTraceWriter(buf) | |
|
|
// | | where buf is not yet bound | |
|
|
// | | buffer reservations += buf | | (yes)
|
|
// | | | |
|
|
// | | BindStartupTargetBuffer(buf, id) |-----
|
|
// | | buffer reservations -= buf | reservations > 0?
|
|
// | | |
|
|
// | | | (no)
|
|
// V | V
|
|
// [ fully_bound_, endpoint_, 0 unbound buffer reservations ]
|
|
// | A
|
|
// | | CreateStartupTraceWriter(buf)
|
|
// | | where buf is already bound
|
|
// ----
|
|
class SharedMemoryArbiterImpl : public SharedMemoryArbiter {
|
|
public:
|
|
// See SharedMemoryArbiter::CreateInstance(). |start|, |size| define the
|
|
// boundaries of the shared memory buffer. ProducerEndpoint and TaskRunner may
|
|
// be |nullptr| if created unbound, see
|
|
// SharedMemoryArbiter::CreateUnboundInstance().
|
|
SharedMemoryArbiterImpl(void* start,
|
|
size_t size,
|
|
size_t page_size,
|
|
TracingService::ProducerEndpoint*,
|
|
base::TaskRunner*);
|
|
|
|
// Returns a new Chunk to write tracing data. Depending on the provided
|
|
// BufferExhaustedPolicy, this may return an invalid chunk if no valid free
|
|
// chunk could be found in the SMB.
|
|
SharedMemoryABI::Chunk GetNewChunk(const SharedMemoryABI::ChunkHeader&,
|
|
BufferExhaustedPolicy,
|
|
size_t size_hint = 0);
|
|
|
|
// Puts back a Chunk that has been completed and sends a request to the
|
|
// service to move it to the central tracing buffer. |target_buffer| is the
|
|
// absolute trace buffer ID where the service should move the chunk onto (the
|
|
// producer is just to copy back the same number received in the
|
|
// DataSourceConfig upon the StartDataSource() reques).
|
|
// PatchList is a pointer to the list of patches for previous chunks. The
|
|
// first patched entries will be removed from the patched list and sent over
|
|
// to the service in the same CommitData() IPC request.
|
|
void ReturnCompletedChunk(SharedMemoryABI::Chunk,
|
|
MaybeUnboundBufferID target_buffer,
|
|
PatchList*);
|
|
|
|
// Send a request to the service to apply completed patches from |patch_list|.
|
|
// |writer_id| is the ID of the TraceWriter that calls this method,
|
|
// |target_buffer| is the global trace buffer ID of its target buffer.
|
|
void SendPatches(WriterID writer_id,
|
|
MaybeUnboundBufferID target_buffer,
|
|
PatchList* patch_list);
|
|
|
|
SharedMemoryABI* shmem_abi_for_testing() { return &shmem_abi_; }
|
|
|
|
static void set_default_layout_for_testing(SharedMemoryABI::PageLayout l) {
|
|
default_page_layout = l;
|
|
}
|
|
|
|
// SharedMemoryArbiter implementation.
|
|
// See include/perfetto/tracing/core/shared_memory_arbiter.h for comments.
|
|
std::unique_ptr<TraceWriter> CreateTraceWriter(
|
|
BufferID target_buffer,
|
|
BufferExhaustedPolicy = BufferExhaustedPolicy::kDefault) override;
|
|
std::unique_ptr<TraceWriter> CreateStartupTraceWriter(
|
|
uint16_t target_buffer_reservation_id) override;
|
|
void BindToProducerEndpoint(TracingService::ProducerEndpoint*,
|
|
base::TaskRunner*) override;
|
|
void BindStartupTargetBuffer(uint16_t target_buffer_reservation_id,
|
|
BufferID target_buffer_id) override;
|
|
void AbortStartupTracingForReservation(
|
|
uint16_t target_buffer_reservation_id) override;
|
|
void NotifyFlushComplete(FlushRequestID) override;
|
|
|
|
void SetBatchCommitsDuration(uint32_t batch_commits_duration_ms) override;
|
|
|
|
void FlushPendingCommitDataRequests(
|
|
std::function<void()> callback = {}) override;
|
|
|
|
base::TaskRunner* task_runner() const { return task_runner_; }
|
|
size_t page_size() const { return shmem_abi_.page_size(); }
|
|
size_t num_pages() const { return shmem_abi_.num_pages(); }
|
|
|
|
base::WeakPtr<SharedMemoryArbiterImpl> GetWeakPtr() const {
|
|
return weak_ptr_factory_.GetWeakPtr();
|
|
}
|
|
|
|
private:
|
|
friend class TraceWriterImpl;
|
|
friend class StartupTraceWriterTest;
|
|
friend class SharedMemoryArbiterImplTest;
|
|
|
|
struct TargetBufferReservation {
|
|
bool resolved = false;
|
|
BufferID target_buffer = kInvalidBufferId;
|
|
};
|
|
|
|
// Placeholder for the actual target buffer ID of a startup target buffer
|
|
// reservation ID in |target_buffer_reservations_|.
|
|
static constexpr BufferID kInvalidBufferId = 0;
|
|
|
|
static SharedMemoryABI::PageLayout default_page_layout;
|
|
|
|
SharedMemoryArbiterImpl(const SharedMemoryArbiterImpl&) = delete;
|
|
SharedMemoryArbiterImpl& operator=(const SharedMemoryArbiterImpl&) = delete;
|
|
|
|
void UpdateCommitDataRequest(SharedMemoryABI::Chunk chunk,
|
|
WriterID writer_id,
|
|
MaybeUnboundBufferID target_buffer,
|
|
PatchList* patch_list);
|
|
|
|
std::unique_ptr<TraceWriter> CreateTraceWriterInternal(
|
|
MaybeUnboundBufferID target_buffer,
|
|
BufferExhaustedPolicy);
|
|
|
|
// Called by the TraceWriter destructor.
|
|
void ReleaseWriterID(WriterID);
|
|
|
|
void BindStartupTargetBufferImpl(std::unique_lock<std::mutex> scoped_lock,
|
|
uint16_t target_buffer_reservation_id,
|
|
BufferID target_buffer_id);
|
|
|
|
// If any flush callbacks were queued up while the arbiter or any target
|
|
// buffer reservation was unbound, this wraps the pending callbacks into a new
|
|
// std::function and returns it. Otherwise returns an invalid std::function.
|
|
std::function<void()> TakePendingFlushCallbacksLocked();
|
|
|
|
// Replace occurrences of target buffer reservation IDs in |commit_data_req_|
|
|
// with their respective actual BufferIDs if they were already bound. Returns
|
|
// true iff all occurrences were replaced.
|
|
bool ReplaceCommitPlaceholderBufferIdsLocked();
|
|
|
|
// Update and return |fully_bound_| based on the arbiter's |pending_writers_|
|
|
// state.
|
|
bool UpdateFullyBoundLocked();
|
|
|
|
const bool initially_bound_;
|
|
|
|
// Only accessed on |task_runner_| after the producer endpoint was bound.
|
|
TracingService::ProducerEndpoint* producer_endpoint_ = nullptr;
|
|
|
|
// --- Begin lock-protected members ---
|
|
|
|
std::mutex lock_;
|
|
|
|
base::TaskRunner* task_runner_ = nullptr;
|
|
SharedMemoryABI shmem_abi_;
|
|
size_t page_idx_ = 0;
|
|
std::unique_ptr<CommitDataRequest> commit_data_req_;
|
|
size_t bytes_pending_commit_ = 0; // SUM(chunk.size() : commit_data_req_).
|
|
IdAllocator<WriterID> active_writer_ids_;
|
|
|
|
// Whether the arbiter itself and all startup target buffer reservations are
|
|
// bound. Note that this can become false again later if a new target buffer
|
|
// reservation is created by calling CreateStartupTraceWriter() with a new
|
|
// reservation id.
|
|
bool fully_bound_;
|
|
|
|
// IDs of writers and their assigned target buffers that should be registered
|
|
// with the service after the arbiter and/or their startup target buffer is
|
|
// bound.
|
|
std::map<WriterID, MaybeUnboundBufferID> pending_writers_;
|
|
|
|
// Callbacks for flush requests issued while the arbiter or a target buffer
|
|
// reservation was unbound.
|
|
std::vector<std::function<void()>> pending_flush_callbacks_;
|
|
|
|
// See SharedMemoryArbiter.SetBatchCommitsDuration.
|
|
uint32_t batch_commits_duration_ms_ = 0;
|
|
|
|
// Indicates whether we have already scheduled a delayed flush for the
|
|
// purposes of batching. Set to true at the beginning of a batching period and
|
|
// cleared at the end of the period. Immediate flushes that happen during a
|
|
// batching period will empty the |commit_data_req| (triggering an immediate
|
|
// IPC to the service), but will not clear this flag and the
|
|
// previously-scheduled delayed flush will still occur at the end of the
|
|
// batching period.
|
|
bool delayed_flush_scheduled_ = false;
|
|
|
|
// Stores target buffer reservations for writers created via
|
|
// CreateStartupTraceWriter(). A bound reservation sets
|
|
// TargetBufferReservation::resolved to true and is associated with the actual
|
|
// BufferID supplied in BindStartupTargetBuffer().
|
|
//
|
|
// TODO(eseckler): Clean up entries from this map. This would probably require
|
|
// a method in SharedMemoryArbiter that allows a producer to invalidate a
|
|
// reservation ID.
|
|
std::map<MaybeUnboundBufferID, TargetBufferReservation>
|
|
target_buffer_reservations_;
|
|
|
|
// --- End lock-protected members ---
|
|
|
|
// Keep at the end.
|
|
base::WeakPtrFactory<SharedMemoryArbiterImpl> weak_ptr_factory_;
|
|
};
|
|
|
|
} // namespace perfetto
|
|
|
|
#endif // SRC_TRACING_CORE_SHARED_MEMORY_ARBITER_IMPL_H_
|
|
// gen_amalgamated begin header: include/perfetto/ext/tracing/core/commit_data_request.h
|
|
/*
|
|
* Copyright (C) 2017 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
#ifndef INCLUDE_PERFETTO_EXT_TRACING_CORE_COMMIT_DATA_REQUEST_H_
|
|
#define INCLUDE_PERFETTO_EXT_TRACING_CORE_COMMIT_DATA_REQUEST_H_
|
|
|
|
// Creates the aliases in the ::perfetto namespace, doing things like:
|
|
// using ::perfetto::Foo = ::perfetto::protos::gen::Foo.
|
|
// See comments in forward_decls.h for the historical reasons of this
|
|
// indirection layer.
|
|
// gen_amalgamated expanded: #include "perfetto/tracing/core/forward_decls.h"
|
|
|
|
// gen_amalgamated expanded: #include "protos/perfetto/common/commit_data_request.gen.h"
|
|
|
|
#endif // INCLUDE_PERFETTO_EXT_TRACING_CORE_COMMIT_DATA_REQUEST_H_
|
|
// gen_amalgamated begin header: src/tracing/core/trace_writer_impl.h
|
|
// gen_amalgamated begin header: src/tracing/core/patch_list.h
|
|
/*
|
|
* Copyright (C) 2018 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
#ifndef SRC_TRACING_CORE_PATCH_LIST_H_
|
|
#define SRC_TRACING_CORE_PATCH_LIST_H_
|
|
|
|
#include <array>
|
|
#include <forward_list>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/base/logging.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/basic_types.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/shared_memory_abi.h"
|
|
|
|
namespace perfetto {
|
|
|
|
// Used to handle the backfilling of the headers (the |size_field|) of nested
|
|
// messages when a proto is fragmented over several chunks. These patches are
|
|
// sent out-of-band to the tracing service, after having returned the initial
|
|
// chunks of the fragment.
|
|
// TODO(crbug.com/904477): Re-disable the move constructors when all usses of
|
|
// this class have been fixed.
|
|
class Patch {
|
|
public:
|
|
using PatchContent = std::array<uint8_t, SharedMemoryABI::kPacketHeaderSize>;
|
|
Patch(ChunkID c, uint16_t o) : chunk_id(c), offset(o) {}
|
|
Patch(const Patch&) = default; // For tests.
|
|
|
|
const ChunkID chunk_id;
|
|
const uint16_t offset;
|
|
PatchContent size_field{};
|
|
|
|
// |size_field| contains a varint. Any varint must start with != 0. Even in
|
|
// the case we want to encode a size == 0, protozero will write a redundant
|
|
// varint for that, that is [0x80, 0x80, 0x80, 0x00]. So the first byte is 0
|
|
// iff we never wrote any varint into that.
|
|
bool is_patched() const { return size_field[0] != 0; }
|
|
|
|
// For tests.
|
|
bool operator==(const Patch& o) const {
|
|
return chunk_id == o.chunk_id && offset == o.offset &&
|
|
size_field == o.size_field;
|
|
}
|
|
|
|
private:
|
|
Patch& operator=(const Patch&) = delete;
|
|
};
|
|
|
|
// Note: the protozero::Message(s) will take pointers to the |size_field| of
|
|
// these entries. This container must guarantee that the Patch objects are never
|
|
// moved around (i.e. cannot be a vector because of reallocations can change
|
|
// addresses of pre-existing entries).
|
|
class PatchList {
|
|
public:
|
|
using ListType = std::forward_list<Patch>;
|
|
using value_type = ListType::value_type; // For gtest.
|
|
using const_iterator = ListType::const_iterator; // For gtest.
|
|
|
|
PatchList() : last_(list_.before_begin()) {}
|
|
|
|
Patch* emplace_back(ChunkID chunk_id, uint16_t offset) {
|
|
PERFETTO_DCHECK(empty() || last_->chunk_id != chunk_id ||
|
|
offset >= last_->offset + sizeof(Patch::PatchContent));
|
|
last_ = list_.emplace_after(last_, chunk_id, offset);
|
|
return &*last_;
|
|
}
|
|
|
|
void pop_front() {
|
|
PERFETTO_DCHECK(!list_.empty());
|
|
list_.pop_front();
|
|
if (empty())
|
|
last_ = list_.before_begin();
|
|
}
|
|
|
|
const Patch& front() const {
|
|
PERFETTO_DCHECK(!list_.empty());
|
|
return list_.front();
|
|
}
|
|
|
|
const Patch& back() const {
|
|
PERFETTO_DCHECK(!list_.empty());
|
|
return *last_;
|
|
}
|
|
|
|
ListType::const_iterator begin() const { return list_.begin(); }
|
|
ListType::const_iterator end() const { return list_.end(); }
|
|
bool empty() const { return list_.empty(); }
|
|
|
|
private:
|
|
ListType list_;
|
|
ListType::iterator last_;
|
|
};
|
|
|
|
} // namespace perfetto
|
|
|
|
#endif // SRC_TRACING_CORE_PATCH_LIST_H_
|
|
/*
|
|
* Copyright (C) 2017 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
#ifndef SRC_TRACING_CORE_TRACE_WRITER_IMPL_H_
|
|
#define SRC_TRACING_CORE_TRACE_WRITER_IMPL_H_
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/base/proc_utils.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/basic_types.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/shared_memory_abi.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/shared_memory_arbiter.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/trace_writer.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/message_handle.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/proto_utils.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/root_message.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/scattered_stream_writer.h"
|
|
// gen_amalgamated expanded: #include "perfetto/tracing/buffer_exhausted_policy.h"
|
|
// gen_amalgamated expanded: #include "src/tracing/core/patch_list.h"
|
|
|
|
namespace perfetto {
|
|
|
|
class SharedMemoryArbiterImpl;
|
|
|
|
// See //include/perfetto/tracing/core/trace_writer.h for docs.
|
|
class TraceWriterImpl : public TraceWriter,
|
|
public protozero::ScatteredStreamWriter::Delegate {
|
|
public:
|
|
// TracePacketHandle is defined in trace_writer.h
|
|
TraceWriterImpl(SharedMemoryArbiterImpl*,
|
|
WriterID,
|
|
MaybeUnboundBufferID buffer_id,
|
|
BufferExhaustedPolicy);
|
|
~TraceWriterImpl() override;
|
|
|
|
// TraceWriter implementation. See documentation in trace_writer.h.
|
|
TracePacketHandle NewTracePacket() override;
|
|
void Flush(std::function<void()> callback = {}) override;
|
|
WriterID writer_id() const override;
|
|
uint64_t written() const override {
|
|
return protobuf_stream_writer_.written();
|
|
}
|
|
|
|
void ResetChunkForTesting() { cur_chunk_ = SharedMemoryABI::Chunk(); }
|
|
bool drop_packets_for_testing() const { return drop_packets_; }
|
|
|
|
private:
|
|
TraceWriterImpl(const TraceWriterImpl&) = delete;
|
|
TraceWriterImpl& operator=(const TraceWriterImpl&) = delete;
|
|
|
|
// ScatteredStreamWriter::Delegate implementation.
|
|
protozero::ContiguousMemoryRange GetNewBuffer() override;
|
|
|
|
// The per-producer arbiter that coordinates access to the shared memory
|
|
// buffer from several threads.
|
|
SharedMemoryArbiterImpl* const shmem_arbiter_;
|
|
|
|
// ID of the current writer.
|
|
const WriterID id_;
|
|
|
|
// This is copied into the commit request by SharedMemoryArbiter. See comments
|
|
// in data_source_config.proto for |target_buffer|. If this is a reservation
|
|
// for a buffer ID in case of a startup trace writer, SharedMemoryArbiterImpl
|
|
// will also translate the reservation ID to the actual buffer ID.
|
|
const MaybeUnboundBufferID target_buffer_;
|
|
|
|
// Whether GetNewChunk() should stall or return an invalid chunk if the SMB is
|
|
// exhausted.
|
|
const BufferExhaustedPolicy buffer_exhausted_policy_;
|
|
|
|
// Monotonic (% wrapping) sequence id of the chunk. Together with the WriterID
|
|
// this allows the Service to reconstruct the linear sequence of packets.
|
|
ChunkID next_chunk_id_ = 0;
|
|
|
|
// The chunk we are holding onto (if any).
|
|
SharedMemoryABI::Chunk cur_chunk_;
|
|
|
|
// Passed to protozero message to write directly into |cur_chunk_|. It
|
|
// keeps track of the write pointer. It calls us back (GetNewBuffer()) when
|
|
// |cur_chunk_| is filled.
|
|
protozero::ScatteredStreamWriter protobuf_stream_writer_;
|
|
|
|
// The packet returned via NewTracePacket(). Its owned by this class,
|
|
// TracePacketHandle has just a pointer to it.
|
|
std::unique_ptr<protozero::RootMessage<protos::pbzero::TracePacket>>
|
|
cur_packet_;
|
|
|
|
// The start address of |cur_packet_| within |cur_chunk_|. Used to figure out
|
|
// fragments sizes when a TracePacket write is interrupted by GetNewBuffer().
|
|
uint8_t* cur_fragment_start_ = nullptr;
|
|
|
|
// true if we received a call to GetNewBuffer() after NewTracePacket(),
|
|
// false if GetNewBuffer() happened during NewTracePacket() prologue, while
|
|
// starting the TracePacket header.
|
|
bool fragmenting_packet_ = false;
|
|
|
|
// Set to |true| when the current chunk contains the maximum number of packets
|
|
// a chunk can contain. When this is |true|, the next packet requires starting
|
|
// a new chunk.
|
|
bool reached_max_packets_per_chunk_ = false;
|
|
|
|
// If we fail to acquire a new chunk when the arbiter operates in
|
|
// SharedMemory::BufferExhaustedPolicy::kDrop mode, the trace writer enters a
|
|
// mode in which data is written to a local garbage chunk and dropped.
|
|
bool drop_packets_ = false;
|
|
|
|
// Whether the trace writer should try to acquire a new chunk from the SMB
|
|
// when the next TracePacket is started because it filled the garbage chunk at
|
|
// least once since the last attempt.
|
|
bool retry_new_chunk_after_packet_ = false;
|
|
|
|
// Points to the size field of the last packet we wrote to the current chunk.
|
|
// If the chunk was already returned, this is reset to |nullptr|.
|
|
uint8_t* last_packet_size_field_ = nullptr;
|
|
|
|
// When a packet is fragmented across different chunks, the |size_field| of
|
|
// the outstanding nested protobuf messages is redirected onto Patch entries
|
|
// in this list at the time the Chunk is returned (because at that point we
|
|
// have to release the ownership of the current Chunk). This list will be
|
|
// later sent out-of-band to the tracing service, who will patch the required
|
|
// chunks, if they are still around.
|
|
PatchList patch_list_;
|
|
|
|
// PID of the process that created the trace writer. Used for a DCHECK that
|
|
// aims to detect unsupported process forks while tracing.
|
|
const base::PlatformProcessId process_id_;
|
|
};
|
|
|
|
} // namespace perfetto
|
|
|
|
#endif // SRC_TRACING_CORE_TRACE_WRITER_IMPL_H_
|
|
/*
|
|
* Copyright (C) 2017 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
// gen_amalgamated expanded: #include "src/tracing/core/shared_memory_arbiter_impl.h"
|
|
|
|
#include <algorithm>
|
|
#include <limits>
|
|
#include <utility>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/base/logging.h"
|
|
// gen_amalgamated expanded: #include "perfetto/base/task_runner.h"
|
|
// gen_amalgamated expanded: #include "perfetto/base/time.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/commit_data_request.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/shared_memory.h"
|
|
// gen_amalgamated expanded: #include "src/tracing/core/null_trace_writer.h"
|
|
// gen_amalgamated expanded: #include "src/tracing/core/trace_writer_impl.h"
|
|
|
|
namespace perfetto {
|
|
|
|
using Chunk = SharedMemoryABI::Chunk;
|
|
|
|
namespace {
|
|
static_assert(sizeof(BufferID) == sizeof(uint16_t),
|
|
"The MaybeUnboundBufferID logic requires BufferID not to grow "
|
|
"above uint16_t.");
|
|
|
|
MaybeUnboundBufferID MakeTargetBufferIdForReservation(uint16_t reservation_id) {
|
|
// Reservation IDs are stored in the upper bits.
|
|
PERFETTO_CHECK(reservation_id > 0);
|
|
return static_cast<MaybeUnboundBufferID>(reservation_id) << 16;
|
|
}
|
|
|
|
bool IsReservationTargetBufferId(MaybeUnboundBufferID buffer_id) {
|
|
return (buffer_id >> 16) > 0;
|
|
}
|
|
} // namespace
|
|
|
|
// static
|
|
SharedMemoryABI::PageLayout SharedMemoryArbiterImpl::default_page_layout =
|
|
SharedMemoryABI::PageLayout::kPageDiv1;
|
|
|
|
// static
|
|
constexpr BufferID SharedMemoryArbiterImpl::kInvalidBufferId;
|
|
|
|
// static
|
|
std::unique_ptr<SharedMemoryArbiter> SharedMemoryArbiter::CreateInstance(
|
|
SharedMemory* shared_memory,
|
|
size_t page_size,
|
|
TracingService::ProducerEndpoint* producer_endpoint,
|
|
base::TaskRunner* task_runner) {
|
|
return std::unique_ptr<SharedMemoryArbiterImpl>(
|
|
new SharedMemoryArbiterImpl(shared_memory->start(), shared_memory->size(),
|
|
page_size, producer_endpoint, task_runner));
|
|
}
|
|
|
|
// static
|
|
std::unique_ptr<SharedMemoryArbiter> SharedMemoryArbiter::CreateUnboundInstance(
|
|
SharedMemory* shared_memory,
|
|
size_t page_size) {
|
|
return std::unique_ptr<SharedMemoryArbiterImpl>(new SharedMemoryArbiterImpl(
|
|
shared_memory->start(), shared_memory->size(), page_size,
|
|
/*producer_endpoint=*/nullptr, /*task_runner=*/nullptr));
|
|
}
|
|
|
|
SharedMemoryArbiterImpl::SharedMemoryArbiterImpl(
|
|
void* start,
|
|
size_t size,
|
|
size_t page_size,
|
|
TracingService::ProducerEndpoint* producer_endpoint,
|
|
base::TaskRunner* task_runner)
|
|
: initially_bound_(task_runner && producer_endpoint),
|
|
producer_endpoint_(producer_endpoint),
|
|
task_runner_(task_runner),
|
|
shmem_abi_(reinterpret_cast<uint8_t*>(start), size, page_size),
|
|
active_writer_ids_(kMaxWriterID),
|
|
fully_bound_(initially_bound_),
|
|
weak_ptr_factory_(this) {}
|
|
|
|
Chunk SharedMemoryArbiterImpl::GetNewChunk(
|
|
const SharedMemoryABI::ChunkHeader& header,
|
|
BufferExhaustedPolicy buffer_exhausted_policy,
|
|
size_t size_hint) {
|
|
PERFETTO_DCHECK(size_hint == 0); // Not implemented yet.
|
|
// If initially unbound, we do not support stalling. In theory, we could
|
|
// support stalling for TraceWriters created after the arbiter and startup
|
|
// buffer reservations were bound, but to avoid raciness between the creation
|
|
// of startup writers and binding, we categorically forbid kStall mode.
|
|
PERFETTO_DCHECK(initially_bound_ ||
|
|
buffer_exhausted_policy == BufferExhaustedPolicy::kDrop);
|
|
|
|
int stall_count = 0;
|
|
unsigned stall_interval_us = 0;
|
|
bool task_runner_runs_on_current_thread = false;
|
|
static const unsigned kMaxStallIntervalUs = 100000;
|
|
static const int kLogAfterNStalls = 3;
|
|
static const int kFlushCommitsAfterEveryNStalls = 2;
|
|
static const int kAssertAtNStalls = 100;
|
|
|
|
for (;;) {
|
|
// TODO(primiano): Probably this lock is not really required and this code
|
|
// could be rewritten leveraging only the Try* atomic operations in
|
|
// SharedMemoryABI. But let's not be too adventurous for the moment.
|
|
{
|
|
std::unique_lock<std::mutex> scoped_lock(lock_);
|
|
|
|
task_runner_runs_on_current_thread =
|
|
task_runner_ && task_runner_->RunsTasksOnCurrentThread();
|
|
|
|
// If more than half of the SMB.size() is filled with completed chunks for
|
|
// which we haven't notified the service yet (i.e. they are still enqueued
|
|
// in |commit_data_req_|), force a synchronous CommitDataRequest() even if
|
|
// we acquire a chunk, to reduce the likeliness of stalling the writer.
|
|
//
|
|
// We can only do this if we're writing on the same thread that we access
|
|
// the producer endpoint on, since we cannot notify the producer endpoint
|
|
// to commit synchronously on a different thread. Attempting to flush
|
|
// synchronously on another thread will lead to subtle bugs caused by
|
|
// out-of-order commit requests (crbug.com/919187#c28).
|
|
bool should_commit_synchronously =
|
|
task_runner_runs_on_current_thread &&
|
|
buffer_exhausted_policy == BufferExhaustedPolicy::kStall &&
|
|
commit_data_req_ && bytes_pending_commit_ >= shmem_abi_.size() / 2;
|
|
|
|
const size_t initial_page_idx = page_idx_;
|
|
for (size_t i = 0; i < shmem_abi_.num_pages(); i++) {
|
|
page_idx_ = (initial_page_idx + i) % shmem_abi_.num_pages();
|
|
bool is_new_page = false;
|
|
|
|
// TODO(primiano): make the page layout dynamic.
|
|
auto layout = SharedMemoryArbiterImpl::default_page_layout;
|
|
|
|
if (shmem_abi_.is_page_free(page_idx_)) {
|
|
// TODO(primiano): Use the |size_hint| here to decide the layout.
|
|
is_new_page = shmem_abi_.TryPartitionPage(page_idx_, layout);
|
|
}
|
|
uint32_t free_chunks;
|
|
if (is_new_page) {
|
|
free_chunks = (1 << SharedMemoryABI::kNumChunksForLayout[layout]) - 1;
|
|
} else {
|
|
free_chunks = shmem_abi_.GetFreeChunks(page_idx_);
|
|
}
|
|
|
|
for (uint32_t chunk_idx = 0; free_chunks;
|
|
chunk_idx++, free_chunks >>= 1) {
|
|
if (!(free_chunks & 1))
|
|
continue;
|
|
// We found a free chunk.
|
|
Chunk chunk = shmem_abi_.TryAcquireChunkForWriting(
|
|
page_idx_, chunk_idx, &header);
|
|
if (!chunk.is_valid())
|
|
continue;
|
|
if (stall_count > kLogAfterNStalls) {
|
|
PERFETTO_LOG("Recovered from stall after %d iterations",
|
|
stall_count);
|
|
}
|
|
|
|
if (should_commit_synchronously) {
|
|
// We can't flush while holding the lock.
|
|
scoped_lock.unlock();
|
|
FlushPendingCommitDataRequests();
|
|
return chunk;
|
|
} else {
|
|
return chunk;
|
|
}
|
|
}
|
|
}
|
|
} // scoped_lock
|
|
|
|
if (buffer_exhausted_policy == BufferExhaustedPolicy::kDrop) {
|
|
PERFETTO_DLOG("Shared memory buffer exhaused, returning invalid Chunk!");
|
|
return Chunk();
|
|
}
|
|
|
|
PERFETTO_DCHECK(initially_bound_);
|
|
|
|
// All chunks are taken (either kBeingWritten by us or kBeingRead by the
|
|
// Service).
|
|
if (stall_count++ == kLogAfterNStalls) {
|
|
PERFETTO_LOG("Shared memory buffer overrun! Stalling");
|
|
}
|
|
|
|
if (stall_count == kAssertAtNStalls) {
|
|
PERFETTO_FATAL(
|
|
"Shared memory buffer max stall count exceeded; possible deadlock");
|
|
}
|
|
|
|
// If the IPC thread itself is stalled because the current process has
|
|
// filled up the SMB, we need to make sure that the service can process and
|
|
// purge the chunks written by our process, by flushing any pending commit
|
|
// requests. Because other threads in our process can continue to
|
|
// concurrently grab, fill and commit any chunks purged by the service, it
|
|
// is possible that the SMB remains full and the IPC thread remains stalled,
|
|
// needing to flush the concurrently queued up commits again. This is
|
|
// particularly likely with in-process perfetto service where the IPC thread
|
|
// is the service thread. To avoid remaining stalled forever in such a
|
|
// situation, we attempt to flush periodically after every N stalls.
|
|
if (stall_count % kFlushCommitsAfterEveryNStalls == 0 &&
|
|
task_runner_runs_on_current_thread) {
|
|
// TODO(primiano): sending the IPC synchronously is a temporary workaround
|
|
// until the backpressure logic in probes_producer is sorted out. Until
|
|
// then the risk is that we stall the message loop waiting for the tracing
|
|
// service to consume the shared memory buffer (SMB) and, for this reason,
|
|
// never run the task that tells the service to purge the SMB. This must
|
|
// happen iff we are on the IPC thread, not doing this will cause
|
|
// deadlocks, doing this on the wrong thread causes out-of-order data
|
|
// commits (crbug.com/919187#c28).
|
|
FlushPendingCommitDataRequests();
|
|
} else {
|
|
base::SleepMicroseconds(stall_interval_us);
|
|
stall_interval_us =
|
|
std::min(kMaxStallIntervalUs, (stall_interval_us + 1) * 8);
|
|
}
|
|
}
|
|
}
|
|
|
|
void SharedMemoryArbiterImpl::ReturnCompletedChunk(
|
|
Chunk chunk,
|
|
MaybeUnboundBufferID target_buffer,
|
|
PatchList* patch_list) {
|
|
PERFETTO_DCHECK(chunk.is_valid());
|
|
const WriterID writer_id = chunk.writer_id();
|
|
UpdateCommitDataRequest(std::move(chunk), writer_id, target_buffer,
|
|
patch_list);
|
|
}
|
|
|
|
void SharedMemoryArbiterImpl::SendPatches(WriterID writer_id,
|
|
MaybeUnboundBufferID target_buffer,
|
|
PatchList* patch_list) {
|
|
PERFETTO_DCHECK(!patch_list->empty() && patch_list->front().is_patched());
|
|
UpdateCommitDataRequest(Chunk(), writer_id, target_buffer, patch_list);
|
|
}
|
|
|
|
void SharedMemoryArbiterImpl::UpdateCommitDataRequest(
|
|
Chunk chunk,
|
|
WriterID writer_id,
|
|
MaybeUnboundBufferID target_buffer,
|
|
PatchList* patch_list) {
|
|
// Note: chunk will be invalid if the call came from SendPatches().
|
|
base::TaskRunner* task_runner_to_post_delayed_callback_on = nullptr;
|
|
// The delay with which the flush will be posted.
|
|
uint32_t flush_delay_ms = 0;
|
|
base::WeakPtr<SharedMemoryArbiterImpl> weak_this;
|
|
{
|
|
std::lock_guard<std::mutex> scoped_lock(lock_);
|
|
|
|
if (!commit_data_req_) {
|
|
commit_data_req_.reset(new CommitDataRequest());
|
|
|
|
// Flushing the commit is only supported while we're |fully_bound_|. If we
|
|
// aren't, we'll flush when |fully_bound_| is updated.
|
|
if (fully_bound_ && !delayed_flush_scheduled_) {
|
|
weak_this = weak_ptr_factory_.GetWeakPtr();
|
|
task_runner_to_post_delayed_callback_on = task_runner_;
|
|
flush_delay_ms = batch_commits_duration_ms_;
|
|
delayed_flush_scheduled_ = true;
|
|
}
|
|
}
|
|
|
|
// If a valid chunk is specified, return it and attach it to the request.
|
|
if (chunk.is_valid()) {
|
|
PERFETTO_DCHECK(chunk.writer_id() == writer_id);
|
|
uint8_t chunk_idx = chunk.chunk_idx();
|
|
bytes_pending_commit_ += chunk.size();
|
|
size_t page_idx = shmem_abi_.ReleaseChunkAsComplete(std::move(chunk));
|
|
|
|
// DO NOT access |chunk| after this point, has been std::move()-d above.
|
|
|
|
CommitDataRequest::ChunksToMove* ctm =
|
|
commit_data_req_->add_chunks_to_move();
|
|
ctm->set_page(static_cast<uint32_t>(page_idx));
|
|
ctm->set_chunk(chunk_idx);
|
|
ctm->set_target_buffer(target_buffer);
|
|
}
|
|
|
|
// Get the completed patches for previous chunks from the |patch_list|
|
|
// and attach them.
|
|
ChunkID last_chunk_id = 0; // 0 is irrelevant but keeps the compiler happy.
|
|
CommitDataRequest::ChunkToPatch* last_chunk_req = nullptr;
|
|
while (!patch_list->empty() && patch_list->front().is_patched()) {
|
|
if (!last_chunk_req || last_chunk_id != patch_list->front().chunk_id) {
|
|
last_chunk_req = commit_data_req_->add_chunks_to_patch();
|
|
last_chunk_req->set_writer_id(writer_id);
|
|
last_chunk_id = patch_list->front().chunk_id;
|
|
last_chunk_req->set_chunk_id(last_chunk_id);
|
|
last_chunk_req->set_target_buffer(target_buffer);
|
|
}
|
|
auto* patch_req = last_chunk_req->add_patches();
|
|
patch_req->set_offset(patch_list->front().offset);
|
|
patch_req->set_data(&patch_list->front().size_field[0],
|
|
patch_list->front().size_field.size());
|
|
patch_list->pop_front();
|
|
}
|
|
// Patches are enqueued in the |patch_list| in order and are notified to
|
|
// the service when the chunk is returned. The only case when the current
|
|
// patch list is incomplete is if there is an unpatched entry at the head of
|
|
// the |patch_list| that belongs to the same ChunkID as the last one we are
|
|
// about to send to the service.
|
|
if (last_chunk_req && !patch_list->empty() &&
|
|
patch_list->front().chunk_id == last_chunk_id) {
|
|
last_chunk_req->set_has_more_patches(true);
|
|
}
|
|
|
|
// If the buffer is filling up, we don't want to wait for the next delayed
|
|
// flush to happen. So post a flush for immediate execution.
|
|
if (fully_bound_ && bytes_pending_commit_ >= shmem_abi_.size() / 2) {
|
|
weak_this = weak_ptr_factory_.GetWeakPtr();
|
|
task_runner_to_post_delayed_callback_on = task_runner_;
|
|
flush_delay_ms = 0;
|
|
}
|
|
} // scoped_lock(lock_)
|
|
|
|
// We shouldn't post tasks while locked.
|
|
// |task_runner_to_post_delayed_callback_on| remains valid after unlocking,
|
|
// because |task_runner_| is never reset.
|
|
if (task_runner_to_post_delayed_callback_on) {
|
|
task_runner_to_post_delayed_callback_on->PostDelayedTask(
|
|
[weak_this] {
|
|
if (!weak_this)
|
|
return;
|
|
{
|
|
std::lock_guard<std::mutex> scoped_lock(weak_this.get()->lock_);
|
|
// Clear |delayed_flush_scheduled_|, allowing the next call to
|
|
// UpdateCommitDataRequest to start another batching period.
|
|
weak_this.get()->delayed_flush_scheduled_ = false;
|
|
}
|
|
weak_this->FlushPendingCommitDataRequests();
|
|
},
|
|
flush_delay_ms);
|
|
}
|
|
}
|
|
|
|
void SharedMemoryArbiterImpl::SetBatchCommitsDuration(
|
|
uint32_t batch_commits_duration_ms) {
|
|
std::lock_guard<std::mutex> scoped_lock(lock_);
|
|
batch_commits_duration_ms_ = batch_commits_duration_ms;
|
|
}
|
|
|
|
// This function is quite subtle. When making changes keep in mind these two
|
|
// challenges:
|
|
// 1) If the producer stalls and we happen to be on the |task_runner_| IPC
|
|
// thread (or, for in-process cases, on the same thread where
|
|
// TracingServiceImpl lives), the CommitData() call must be synchronous and
|
|
// not posted, to avoid deadlocks.
|
|
// 2) When different threads hit this function, we must guarantee that we don't
|
|
// accidentally make commits out of order. See commit 4e4fe8f56ef and
|
|
// crbug.com/919187 for more context.
|
|
void SharedMemoryArbiterImpl::FlushPendingCommitDataRequests(
|
|
std::function<void()> callback) {
|
|
std::unique_ptr<CommitDataRequest> req;
|
|
{
|
|
std::unique_lock<std::mutex> scoped_lock(lock_);
|
|
|
|
// Flushing is only supported while |fully_bound_|, and there may still be
|
|
// unbound startup trace writers. If so, skip the commit for now - it'll be
|
|
// done when |fully_bound_| is updated.
|
|
if (!fully_bound_) {
|
|
if (callback)
|
|
pending_flush_callbacks_.push_back(callback);
|
|
return;
|
|
}
|
|
|
|
// May be called by TraceWriterImpl on any thread.
|
|
base::TaskRunner* task_runner = task_runner_;
|
|
if (!task_runner->RunsTasksOnCurrentThread()) {
|
|
// We shouldn't post a task while holding a lock. |task_runner| remains
|
|
// valid after unlocking, because |task_runner_| is never reset.
|
|
scoped_lock.unlock();
|
|
|
|
auto weak_this = weak_ptr_factory_.GetWeakPtr();
|
|
task_runner->PostTask([weak_this, callback] {
|
|
if (weak_this)
|
|
weak_this->FlushPendingCommitDataRequests(std::move(callback));
|
|
});
|
|
return;
|
|
}
|
|
|
|
// |commit_data_req_| could have become a nullptr, for example when a forced
|
|
// sync flush happens in GetNewChunk().
|
|
if (commit_data_req_) {
|
|
// Make sure any placeholder buffer IDs from StartupWriters are replaced
|
|
// before sending the request.
|
|
bool all_placeholders_replaced =
|
|
ReplaceCommitPlaceholderBufferIdsLocked();
|
|
// We're |fully_bound_|, thus all writers are bound and all placeholders
|
|
// should have been replaced.
|
|
PERFETTO_DCHECK(all_placeholders_replaced);
|
|
|
|
req = std::move(commit_data_req_);
|
|
bytes_pending_commit_ = 0;
|
|
}
|
|
} // scoped_lock
|
|
|
|
if (req) {
|
|
producer_endpoint_->CommitData(*req, callback);
|
|
} else if (callback) {
|
|
// If |req| was nullptr, it means that an enqueued deferred commit was
|
|
// executed just before this. At this point send an empty commit request
|
|
// to the service, just to linearize with it and give the guarantee to the
|
|
// caller that the data has been flushed into the service.
|
|
producer_endpoint_->CommitData(CommitDataRequest(), std::move(callback));
|
|
}
|
|
}
|
|
|
|
std::unique_ptr<TraceWriter> SharedMemoryArbiterImpl::CreateTraceWriter(
|
|
BufferID target_buffer,
|
|
BufferExhaustedPolicy buffer_exhausted_policy) {
|
|
PERFETTO_CHECK(target_buffer > 0);
|
|
return CreateTraceWriterInternal(target_buffer, buffer_exhausted_policy);
|
|
}
|
|
|
|
std::unique_ptr<TraceWriter> SharedMemoryArbiterImpl::CreateStartupTraceWriter(
|
|
uint16_t target_buffer_reservation_id) {
|
|
PERFETTO_CHECK(!initially_bound_);
|
|
return CreateTraceWriterInternal(
|
|
MakeTargetBufferIdForReservation(target_buffer_reservation_id),
|
|
BufferExhaustedPolicy::kDrop);
|
|
}
|
|
|
|
void SharedMemoryArbiterImpl::BindToProducerEndpoint(
|
|
TracingService::ProducerEndpoint* producer_endpoint,
|
|
base::TaskRunner* task_runner) {
|
|
PERFETTO_DCHECK(producer_endpoint && task_runner);
|
|
PERFETTO_DCHECK(task_runner->RunsTasksOnCurrentThread());
|
|
PERFETTO_CHECK(!initially_bound_);
|
|
|
|
bool should_flush = false;
|
|
std::function<void()> flush_callback;
|
|
{
|
|
std::lock_guard<std::mutex> scoped_lock(lock_);
|
|
PERFETTO_CHECK(!fully_bound_);
|
|
PERFETTO_CHECK(!producer_endpoint_ && !task_runner_);
|
|
|
|
producer_endpoint_ = producer_endpoint;
|
|
task_runner_ = task_runner;
|
|
|
|
// Now that we're bound to a task runner, also reset the WeakPtrFactory to
|
|
// it. Because this code runs on the task runner, the factory's weak
|
|
// pointers will be valid on it.
|
|
weak_ptr_factory_.Reset(this);
|
|
|
|
// All writers registered so far should be startup trace writers, since
|
|
// the producer cannot feasibly know the target buffer for any future
|
|
// session yet.
|
|
for (const auto& entry : pending_writers_) {
|
|
PERFETTO_CHECK(IsReservationTargetBufferId(entry.second));
|
|
}
|
|
|
|
// If all buffer reservations are bound, we can flush pending commits.
|
|
if (UpdateFullyBoundLocked()) {
|
|
should_flush = true;
|
|
flush_callback = TakePendingFlushCallbacksLocked();
|
|
}
|
|
} // scoped_lock
|
|
|
|
// Attempt to flush any pending commits (and run pending flush callbacks). If
|
|
// there are none, this will have no effect. If we ended up in a race that
|
|
// changed |fully_bound_| back to false, the commit will happen once we become
|
|
// |fully_bound_| again.
|
|
if (should_flush)
|
|
FlushPendingCommitDataRequests(flush_callback);
|
|
}
|
|
|
|
void SharedMemoryArbiterImpl::BindStartupTargetBuffer(
|
|
uint16_t target_buffer_reservation_id,
|
|
BufferID target_buffer_id) {
|
|
PERFETTO_DCHECK(target_buffer_id > 0);
|
|
PERFETTO_CHECK(!initially_bound_);
|
|
|
|
std::unique_lock<std::mutex> scoped_lock(lock_);
|
|
|
|
// We should already be bound to an endpoint, but not fully bound.
|
|
PERFETTO_CHECK(!fully_bound_);
|
|
PERFETTO_CHECK(producer_endpoint_);
|
|
PERFETTO_CHECK(task_runner_);
|
|
PERFETTO_CHECK(task_runner_->RunsTasksOnCurrentThread());
|
|
|
|
BindStartupTargetBufferImpl(std::move(scoped_lock),
|
|
target_buffer_reservation_id, target_buffer_id);
|
|
}
|
|
|
|
void SharedMemoryArbiterImpl::AbortStartupTracingForReservation(
|
|
uint16_t target_buffer_reservation_id) {
|
|
PERFETTO_CHECK(!initially_bound_);
|
|
|
|
std::unique_lock<std::mutex> scoped_lock(lock_);
|
|
|
|
// If we are already bound to an arbiter, we may need to flush after aborting
|
|
// the session, and thus should be running on the arbiter's task runner.
|
|
if (task_runner_ && !task_runner_->RunsTasksOnCurrentThread()) {
|
|
// We shouldn't post tasks while locked.
|
|
auto* task_runner = task_runner_;
|
|
scoped_lock.unlock();
|
|
|
|
auto weak_this = weak_ptr_factory_.GetWeakPtr();
|
|
task_runner->PostTask([weak_this, target_buffer_reservation_id]() {
|
|
if (!weak_this)
|
|
return;
|
|
weak_this->AbortStartupTracingForReservation(
|
|
target_buffer_reservation_id);
|
|
});
|
|
return;
|
|
}
|
|
|
|
PERFETTO_CHECK(!fully_bound_);
|
|
|
|
// Bind the target buffer reservation to an invalid buffer (ID 0), so that
|
|
// existing commits, as well as future commits (of currently acquired chunks),
|
|
// will be released as free free by the service but otherwise ignored (i.e.
|
|
// not copied into any valid target buffer).
|
|
BindStartupTargetBufferImpl(std::move(scoped_lock),
|
|
target_buffer_reservation_id,
|
|
/*target_buffer_id=*/kInvalidBufferId);
|
|
}
|
|
|
|
void SharedMemoryArbiterImpl::BindStartupTargetBufferImpl(
|
|
std::unique_lock<std::mutex> scoped_lock,
|
|
uint16_t target_buffer_reservation_id,
|
|
BufferID target_buffer_id) {
|
|
// We should already be bound to an endpoint if the target buffer is valid.
|
|
PERFETTO_DCHECK((producer_endpoint_ && task_runner_) ||
|
|
target_buffer_id == kInvalidBufferId);
|
|
|
|
MaybeUnboundBufferID reserved_id =
|
|
MakeTargetBufferIdForReservation(target_buffer_reservation_id);
|
|
|
|
bool should_flush = false;
|
|
std::function<void()> flush_callback;
|
|
std::vector<std::pair<WriterID, BufferID>> writers_to_register;
|
|
|
|
TargetBufferReservation& reservation =
|
|
target_buffer_reservations_[reserved_id];
|
|
PERFETTO_CHECK(!reservation.resolved);
|
|
reservation.resolved = true;
|
|
reservation.target_buffer = target_buffer_id;
|
|
|
|
// Collect trace writers associated with the reservation.
|
|
for (auto it = pending_writers_.begin(); it != pending_writers_.end();) {
|
|
if (it->second == reserved_id) {
|
|
// No need to register writers that have an invalid target buffer.
|
|
if (target_buffer_id != kInvalidBufferId) {
|
|
writers_to_register.push_back(
|
|
std::make_pair(it->first, target_buffer_id));
|
|
}
|
|
it = pending_writers_.erase(it);
|
|
} else {
|
|
it++;
|
|
}
|
|
}
|
|
|
|
// If all buffer reservations are bound, we can flush pending commits.
|
|
if (UpdateFullyBoundLocked()) {
|
|
should_flush = true;
|
|
flush_callback = TakePendingFlushCallbacksLocked();
|
|
}
|
|
|
|
scoped_lock.unlock();
|
|
|
|
// Register any newly bound trace writers with the service.
|
|
for (const auto& writer_and_target_buffer : writers_to_register) {
|
|
producer_endpoint_->RegisterTraceWriter(writer_and_target_buffer.first,
|
|
writer_and_target_buffer.second);
|
|
}
|
|
|
|
// Attempt to flush any pending commits (and run pending flush callbacks). If
|
|
// there are none, this will have no effect. If we ended up in a race that
|
|
// changed |fully_bound_| back to false, the commit will happen once we become
|
|
// |fully_bound_| again.
|
|
if (should_flush)
|
|
FlushPendingCommitDataRequests(flush_callback);
|
|
}
|
|
|
|
std::function<void()>
|
|
SharedMemoryArbiterImpl::TakePendingFlushCallbacksLocked() {
|
|
if (pending_flush_callbacks_.empty())
|
|
return std::function<void()>();
|
|
|
|
std::vector<std::function<void()>> pending_flush_callbacks;
|
|
pending_flush_callbacks.swap(pending_flush_callbacks_);
|
|
// Capture the callback list into the lambda by copy.
|
|
return [pending_flush_callbacks]() {
|
|
for (auto& callback : pending_flush_callbacks)
|
|
callback();
|
|
};
|
|
}
|
|
|
|
void SharedMemoryArbiterImpl::NotifyFlushComplete(FlushRequestID req_id) {
|
|
base::TaskRunner* task_runner_to_commit_on = nullptr;
|
|
|
|
{
|
|
std::lock_guard<std::mutex> scoped_lock(lock_);
|
|
// If a commit_data_req_ exists it means that somebody else already posted a
|
|
// FlushPendingCommitDataRequests() task.
|
|
if (!commit_data_req_) {
|
|
commit_data_req_.reset(new CommitDataRequest());
|
|
|
|
// Flushing the commit is only supported while we're |fully_bound_|. If we
|
|
// aren't, we'll flush when |fully_bound_| is updated.
|
|
if (fully_bound_)
|
|
task_runner_to_commit_on = task_runner_;
|
|
} else {
|
|
// If there is another request queued and that also contains is a reply
|
|
// to a flush request, reply with the highest id.
|
|
req_id = std::max(req_id, commit_data_req_->flush_request_id());
|
|
}
|
|
commit_data_req_->set_flush_request_id(req_id);
|
|
} // scoped_lock
|
|
|
|
// We shouldn't post tasks while locked. |task_runner_to_commit_on|
|
|
// remains valid after unlocking, because |task_runner_| is never reset.
|
|
if (task_runner_to_commit_on) {
|
|
auto weak_this = weak_ptr_factory_.GetWeakPtr();
|
|
task_runner_to_commit_on->PostTask([weak_this] {
|
|
if (weak_this)
|
|
weak_this->FlushPendingCommitDataRequests();
|
|
});
|
|
}
|
|
}
|
|
|
|
std::unique_ptr<TraceWriter> SharedMemoryArbiterImpl::CreateTraceWriterInternal(
|
|
MaybeUnboundBufferID target_buffer,
|
|
BufferExhaustedPolicy buffer_exhausted_policy) {
|
|
WriterID id;
|
|
base::TaskRunner* task_runner_to_register_on = nullptr;
|
|
|
|
{
|
|
std::lock_guard<std::mutex> scoped_lock(lock_);
|
|
id = active_writer_ids_.Allocate();
|
|
|
|
if (!id)
|
|
return std::unique_ptr<TraceWriter>(new NullTraceWriter());
|
|
|
|
PERFETTO_DCHECK(!pending_writers_.count(id));
|
|
|
|
if (IsReservationTargetBufferId(target_buffer)) {
|
|
// If the reservation is new, mark it as unbound in
|
|
// |target_buffer_reservations_|. Otherwise, if the reservation was
|
|
// already bound, choose the bound buffer ID now.
|
|
auto it_and_inserted = target_buffer_reservations_.insert(
|
|
{target_buffer, TargetBufferReservation()});
|
|
if (it_and_inserted.first->second.resolved)
|
|
target_buffer = it_and_inserted.first->second.target_buffer;
|
|
}
|
|
|
|
if (IsReservationTargetBufferId(target_buffer)) {
|
|
// The arbiter and/or startup buffer reservations are not bound yet, so
|
|
// buffer the registration of the writer until after we're bound.
|
|
pending_writers_[id] = target_buffer;
|
|
|
|
// Mark the arbiter as not fully bound, since we now have at least one
|
|
// unbound trace writer / target buffer reservation.
|
|
fully_bound_ = false;
|
|
} else if (target_buffer != kInvalidBufferId) {
|
|
// Trace writer is bound, so arbiter should be bound to an endpoint, too.
|
|
PERFETTO_CHECK(producer_endpoint_ && task_runner_);
|
|
task_runner_to_register_on = task_runner_;
|
|
}
|
|
} // scoped_lock
|
|
|
|
// We shouldn't post tasks while locked. |task_runner_to_register_on|
|
|
// remains valid after unlocking, because |task_runner_| is never reset.
|
|
if (task_runner_to_register_on) {
|
|
auto weak_this = weak_ptr_factory_.GetWeakPtr();
|
|
task_runner_to_register_on->PostTask([weak_this, id, target_buffer] {
|
|
if (weak_this)
|
|
weak_this->producer_endpoint_->RegisterTraceWriter(id, target_buffer);
|
|
});
|
|
}
|
|
|
|
return std::unique_ptr<TraceWriter>(
|
|
new TraceWriterImpl(this, id, target_buffer, buffer_exhausted_policy));
|
|
}
|
|
|
|
void SharedMemoryArbiterImpl::ReleaseWriterID(WriterID id) {
|
|
base::TaskRunner* task_runner = nullptr;
|
|
{
|
|
std::lock_guard<std::mutex> scoped_lock(lock_);
|
|
active_writer_ids_.Free(id);
|
|
|
|
auto it = pending_writers_.find(id);
|
|
if (it != pending_writers_.end()) {
|
|
// Writer hasn't been bound yet and thus also not yet registered with the
|
|
// service.
|
|
pending_writers_.erase(it);
|
|
return;
|
|
}
|
|
|
|
// A trace writer from an aborted session may be destroyed before the
|
|
// arbiter is bound to a task runner. In that case, it was never registered
|
|
// with the service.
|
|
if (!task_runner_)
|
|
return;
|
|
|
|
task_runner = task_runner_;
|
|
} // scoped_lock
|
|
|
|
// We shouldn't post tasks while locked. |task_runner| remains valid after
|
|
// unlocking, because |task_runner_| is never reset.
|
|
auto weak_this = weak_ptr_factory_.GetWeakPtr();
|
|
task_runner->PostTask([weak_this, id] {
|
|
if (weak_this)
|
|
weak_this->producer_endpoint_->UnregisterTraceWriter(id);
|
|
});
|
|
}
|
|
|
|
bool SharedMemoryArbiterImpl::ReplaceCommitPlaceholderBufferIdsLocked() {
|
|
if (!commit_data_req_)
|
|
return true;
|
|
|
|
bool all_placeholders_replaced = true;
|
|
for (auto& chunk : *commit_data_req_->mutable_chunks_to_move()) {
|
|
if (!IsReservationTargetBufferId(chunk.target_buffer()))
|
|
continue;
|
|
const auto it = target_buffer_reservations_.find(chunk.target_buffer());
|
|
PERFETTO_DCHECK(it != target_buffer_reservations_.end());
|
|
if (!it->second.resolved) {
|
|
all_placeholders_replaced = false;
|
|
continue;
|
|
}
|
|
chunk.set_target_buffer(it->second.target_buffer);
|
|
}
|
|
for (auto& chunk : *commit_data_req_->mutable_chunks_to_patch()) {
|
|
if (!IsReservationTargetBufferId(chunk.target_buffer()))
|
|
continue;
|
|
const auto it = target_buffer_reservations_.find(chunk.target_buffer());
|
|
PERFETTO_DCHECK(it != target_buffer_reservations_.end());
|
|
if (!it->second.resolved) {
|
|
all_placeholders_replaced = false;
|
|
continue;
|
|
}
|
|
chunk.set_target_buffer(it->second.target_buffer);
|
|
}
|
|
return all_placeholders_replaced;
|
|
}
|
|
|
|
bool SharedMemoryArbiterImpl::UpdateFullyBoundLocked() {
|
|
if (!producer_endpoint_) {
|
|
PERFETTO_DCHECK(!fully_bound_);
|
|
return false;
|
|
}
|
|
// We're fully bound if all target buffer reservations have a valid associated
|
|
// BufferID.
|
|
fully_bound_ = std::none_of(
|
|
target_buffer_reservations_.begin(), target_buffer_reservations_.end(),
|
|
[](std::pair<MaybeUnboundBufferID, TargetBufferReservation> entry) {
|
|
return !entry.second.resolved;
|
|
});
|
|
return fully_bound_;
|
|
}
|
|
|
|
} // namespace perfetto
|
|
// gen_amalgamated begin source: src/tracing/core/trace_packet.cc
|
|
// gen_amalgamated begin header: include/perfetto/ext/tracing/core/trace_packet.h
|
|
// gen_amalgamated begin header: include/perfetto/ext/tracing/core/slice.h
|
|
/*
|
|
* Copyright (C) 2017 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
#ifndef INCLUDE_PERFETTO_EXT_TRACING_CORE_SLICE_H_
|
|
#define INCLUDE_PERFETTO_EXT_TRACING_CORE_SLICE_H_
|
|
|
|
#include <stddef.h>
|
|
#include <string.h>
|
|
|
|
#include <memory>
|
|
#include <string>
|
|
#include <vector>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/base/logging.h"
|
|
|
|
namespace perfetto {
|
|
|
|
// A simple wrapper around a virtually contiguous memory range that contains a
|
|
// TracePacket, or just a portion of it.
|
|
struct Slice {
|
|
Slice() : start(nullptr), size(0) {}
|
|
Slice(const void* st, size_t sz) : start(st), size(sz) {}
|
|
Slice(Slice&& other) noexcept = default;
|
|
|
|
// Create a Slice which owns |size| bytes of memory.
|
|
static Slice Allocate(size_t size) {
|
|
Slice slice;
|
|
slice.own_data_.reset(new uint8_t[size]);
|
|
slice.start = &slice.own_data_[0];
|
|
slice.size = size;
|
|
return slice;
|
|
}
|
|
|
|
uint8_t* own_data() {
|
|
PERFETTO_DCHECK(own_data_);
|
|
return own_data_.get();
|
|
}
|
|
|
|
const void* start;
|
|
size_t size;
|
|
|
|
private:
|
|
Slice(const Slice&) = delete;
|
|
void operator=(const Slice&) = delete;
|
|
|
|
std::unique_ptr<uint8_t[]> own_data_;
|
|
};
|
|
|
|
// TODO(primiano): most TracePacket(s) fit in a slice or two. We need something
|
|
// a bit more clever here that has inline capacity for 2 slices and then uses a
|
|
// std::forward_list or a std::vector for the less likely cases.
|
|
using Slices = std::vector<Slice>;
|
|
|
|
} // namespace perfetto
|
|
|
|
#endif // INCLUDE_PERFETTO_EXT_TRACING_CORE_SLICE_H_
|
|
/*
|
|
* Copyright (C) 2017 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
#ifndef INCLUDE_PERFETTO_EXT_TRACING_CORE_TRACE_PACKET_H_
|
|
#define INCLUDE_PERFETTO_EXT_TRACING_CORE_TRACE_PACKET_H_
|
|
|
|
#include <stddef.h>
|
|
#include <memory>
|
|
#include <tuple>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/base/export.h"
|
|
// gen_amalgamated expanded: #include "perfetto/base/logging.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/slice.h"
|
|
|
|
namespace perfetto {
|
|
|
|
// A wrapper around a byte buffer that contains a protobuf-encoded TracePacket
|
|
// (see trace_packet.proto). The TracePacket is decoded only if the Consumer
|
|
// requests that. This is to allow Consumer(s) to just stream the packet over
|
|
// the network or save it to a file without wasting time decoding it and without
|
|
// needing to depend on libprotobuf or the trace_packet.pb.h header.
|
|
// If the packets are saved / streamed and not just consumed locally, consumers
|
|
// should ensure to preserve the unknown fields in the proto. A consumer, in
|
|
// fact, might have an older version .proto which is newer on the producer.
|
|
class PERFETTO_EXPORT TracePacket {
|
|
public:
|
|
using const_iterator = Slices::const_iterator;
|
|
|
|
// The field id of protos::Trace::packet, static_assert()-ed in the unittest.
|
|
static constexpr uint32_t kPacketFieldNumber = 1;
|
|
|
|
// Maximum size of the preamble returned by GetProtoPreamble().
|
|
static constexpr size_t kMaxPreambleBytes = 8;
|
|
|
|
TracePacket();
|
|
~TracePacket();
|
|
TracePacket(TracePacket&&) noexcept;
|
|
TracePacket& operator=(TracePacket&&);
|
|
|
|
// Accesses all the raw slices in the packet, for saving them to file/network.
|
|
const Slices& slices() const { return slices_; }
|
|
|
|
// Mutator, used only by the service and tests.
|
|
void AddSlice(Slice);
|
|
|
|
// Does not copy / take ownership of the memory of the slice. The TracePacket
|
|
// will be valid only as long as the original buffer is valid.
|
|
void AddSlice(const void* start, size_t size);
|
|
|
|
// Total size of all slices.
|
|
size_t size() const { return size_; }
|
|
|
|
// Generates a protobuf preamble suitable to represent this packet as a
|
|
// repeated field within a root trace.proto message.
|
|
// Returns a pointer to a buffer, owned by this class, containing the preamble
|
|
// and its size.
|
|
std::tuple<char*, size_t> GetProtoPreamble();
|
|
|
|
// Returns the raw protobuf bytes of the slices, all stitched together into
|
|
// a string. Only for testing.
|
|
std::string GetRawBytesForTesting();
|
|
|
|
private:
|
|
TracePacket(const TracePacket&) = delete;
|
|
TracePacket& operator=(const TracePacket&) = delete;
|
|
|
|
Slices slices_; // Not owned.
|
|
size_t size_ = 0; // SUM(slice.size for slice in slices_).
|
|
char preamble_[kMaxPreambleBytes]; // Deliberately not initialized.
|
|
|
|
// Remember to update the move operators and their unittest if adding new
|
|
// fields. ConsumerIPCClientImpl::OnReadBuffersResponse() relies on
|
|
// std::move(TracePacket) to clear up the moved-from instance.
|
|
};
|
|
|
|
} // namespace perfetto
|
|
|
|
#endif // INCLUDE_PERFETTO_EXT_TRACING_CORE_TRACE_PACKET_H_
|
|
/*
|
|
* Copyright (C) 2017 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/trace_packet.h"
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/base/logging.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/proto_utils.h"
|
|
|
|
namespace perfetto {
|
|
|
|
TracePacket::TracePacket() = default;
|
|
TracePacket::~TracePacket() = default;
|
|
|
|
TracePacket::TracePacket(TracePacket&& other) noexcept {
|
|
*this = std::move(other);
|
|
}
|
|
|
|
TracePacket& TracePacket::operator=(TracePacket&& other) {
|
|
slices_ = std::move(other.slices_);
|
|
other.slices_.clear();
|
|
size_ = other.size_;
|
|
other.size_ = 0;
|
|
return *this;
|
|
}
|
|
|
|
void TracePacket::AddSlice(Slice slice) {
|
|
size_ += slice.size;
|
|
slices_.push_back(std::move(slice));
|
|
}
|
|
|
|
void TracePacket::AddSlice(const void* start, size_t size) {
|
|
size_ += size;
|
|
slices_.emplace_back(start, size);
|
|
}
|
|
|
|
std::tuple<char*, size_t> TracePacket::GetProtoPreamble() {
|
|
using protozero::proto_utils::MakeTagLengthDelimited;
|
|
using protozero::proto_utils::WriteVarInt;
|
|
uint8_t* ptr = reinterpret_cast<uint8_t*>(&preamble_[0]);
|
|
|
|
constexpr uint8_t tag = MakeTagLengthDelimited(kPacketFieldNumber);
|
|
static_assert(tag < 0x80, "TracePacket tag should fit in one byte");
|
|
*(ptr++) = tag;
|
|
|
|
ptr = WriteVarInt(size(), ptr);
|
|
size_t preamble_size = reinterpret_cast<uintptr_t>(ptr) -
|
|
reinterpret_cast<uintptr_t>(&preamble_[0]);
|
|
PERFETTO_DCHECK(preamble_size <= sizeof(preamble_));
|
|
return std::make_tuple(&preamble_[0], preamble_size);
|
|
}
|
|
|
|
std::string TracePacket::GetRawBytesForTesting() {
|
|
std::string data;
|
|
data.resize(size());
|
|
size_t pos = 0;
|
|
for (const Slice& slice : slices()) {
|
|
PERFETTO_CHECK(pos + slice.size <= data.size());
|
|
memcpy(&data[pos], slice.start, slice.size);
|
|
pos += slice.size;
|
|
}
|
|
return data;
|
|
}
|
|
|
|
} // namespace perfetto
|
|
// gen_amalgamated begin source: src/tracing/core/trace_writer_impl.cc
|
|
/*
|
|
* Copyright (C) 2017 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
// gen_amalgamated expanded: #include "src/tracing/core/trace_writer_impl.h"
|
|
|
|
#include <string.h>
|
|
|
|
#include <algorithm>
|
|
#include <type_traits>
|
|
#include <utility>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/base/logging.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/thread_annotations.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/proto_utils.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/root_message.h"
|
|
// gen_amalgamated expanded: #include "src/tracing/core/shared_memory_arbiter_impl.h"
|
|
|
|
// gen_amalgamated expanded: #include "protos/perfetto/trace/trace_packet.pbzero.h"
|
|
|
|
using protozero::proto_utils::kMessageLengthFieldSize;
|
|
using protozero::proto_utils::WriteRedundantVarInt;
|
|
using ChunkHeader = perfetto::SharedMemoryABI::ChunkHeader;
|
|
|
|
namespace perfetto {
|
|
|
|
namespace {
|
|
constexpr size_t kPacketHeaderSize = SharedMemoryABI::kPacketHeaderSize;
|
|
uint8_t g_garbage_chunk[1024];
|
|
} // namespace
|
|
|
|
TraceWriterImpl::TraceWriterImpl(SharedMemoryArbiterImpl* shmem_arbiter,
|
|
WriterID id,
|
|
MaybeUnboundBufferID target_buffer,
|
|
BufferExhaustedPolicy buffer_exhausted_policy)
|
|
: shmem_arbiter_(shmem_arbiter),
|
|
id_(id),
|
|
target_buffer_(target_buffer),
|
|
buffer_exhausted_policy_(buffer_exhausted_policy),
|
|
protobuf_stream_writer_(this),
|
|
process_id_(base::GetProcessId()) {
|
|
// TODO(primiano): we could handle the case of running out of TraceWriterID(s)
|
|
// more gracefully and always return a no-op TracePacket in NewTracePacket().
|
|
PERFETTO_CHECK(id_ != 0);
|
|
|
|
cur_packet_.reset(new protozero::RootMessage<protos::pbzero::TracePacket>());
|
|
cur_packet_->Finalize(); // To avoid the DCHECK in NewTracePacket().
|
|
}
|
|
|
|
TraceWriterImpl::~TraceWriterImpl() {
|
|
if (cur_chunk_.is_valid()) {
|
|
cur_packet_->Finalize();
|
|
Flush();
|
|
}
|
|
shmem_arbiter_->ReleaseWriterID(id_);
|
|
}
|
|
|
|
void TraceWriterImpl::Flush(std::function<void()> callback) {
|
|
// Flush() cannot be called in the middle of a TracePacket.
|
|
PERFETTO_CHECK(cur_packet_->is_finalized());
|
|
|
|
if (cur_chunk_.is_valid()) {
|
|
shmem_arbiter_->ReturnCompletedChunk(std::move(cur_chunk_), target_buffer_,
|
|
&patch_list_);
|
|
} else {
|
|
PERFETTO_DCHECK(patch_list_.empty());
|
|
}
|
|
// Always issue the Flush request, even if there is nothing to flush, just
|
|
// for the sake of getting the callback posted back.
|
|
shmem_arbiter_->FlushPendingCommitDataRequests(callback);
|
|
protobuf_stream_writer_.Reset({nullptr, nullptr});
|
|
|
|
// |last_packet_size_field_| might have pointed into the chunk we returned.
|
|
last_packet_size_field_ = nullptr;
|
|
}
|
|
|
|
TraceWriterImpl::TracePacketHandle TraceWriterImpl::NewTracePacket() {
|
|
// If we hit this, the caller is calling NewTracePacket() without having
|
|
// finalized the previous packet.
|
|
PERFETTO_CHECK(cur_packet_->is_finalized());
|
|
// If we hit this, this trace writer was created in a different process. This
|
|
// likely means that the process forked while tracing was active, and the
|
|
// forked child process tried to emit a trace event. This is not supported, as
|
|
// it would lead to two processes writing to the same tracing SMB.
|
|
PERFETTO_DCHECK(process_id_ == base::GetProcessId());
|
|
|
|
fragmenting_packet_ = false;
|
|
|
|
// Reserve space for the size of the message. Note: this call might re-enter
|
|
// into this class invoking GetNewBuffer() if there isn't enough space or if
|
|
// this is the very first call to NewTracePacket().
|
|
static_assert(kPacketHeaderSize == kMessageLengthFieldSize,
|
|
"The packet header must match the Message header size");
|
|
|
|
bool was_dropping_packets = drop_packets_;
|
|
|
|
// It doesn't make sense to begin a packet that is going to fragment
|
|
// immediately after (8 is just an arbitrary estimation on the minimum size of
|
|
// a realistic packet).
|
|
bool chunk_too_full =
|
|
protobuf_stream_writer_.bytes_available() < kPacketHeaderSize + 8;
|
|
if (chunk_too_full || reached_max_packets_per_chunk_ ||
|
|
retry_new_chunk_after_packet_) {
|
|
protobuf_stream_writer_.Reset(GetNewBuffer());
|
|
}
|
|
|
|
// Send any completed patches to the service to facilitate trace data
|
|
// recovery by the service. This should only happen when we're completing
|
|
// the first packet in a chunk which was a continuation from the previous
|
|
// chunk, i.e. at most once per chunk.
|
|
if (!patch_list_.empty() && patch_list_.front().is_patched()) {
|
|
shmem_arbiter_->SendPatches(id_, target_buffer_, &patch_list_);
|
|
}
|
|
|
|
cur_packet_->Reset(&protobuf_stream_writer_);
|
|
uint8_t* header = protobuf_stream_writer_.ReserveBytes(kPacketHeaderSize);
|
|
memset(header, 0, kPacketHeaderSize);
|
|
cur_packet_->set_size_field(header);
|
|
last_packet_size_field_ = header;
|
|
|
|
TracePacketHandle handle(cur_packet_.get());
|
|
cur_fragment_start_ = protobuf_stream_writer_.write_ptr();
|
|
fragmenting_packet_ = true;
|
|
|
|
if (PERFETTO_LIKELY(!drop_packets_)) {
|
|
uint16_t new_packet_count = cur_chunk_.IncrementPacketCount();
|
|
reached_max_packets_per_chunk_ =
|
|
new_packet_count == ChunkHeader::Packets::kMaxCount;
|
|
|
|
if (PERFETTO_UNLIKELY(was_dropping_packets)) {
|
|
// We've succeeded to get a new chunk from the SMB after we entered
|
|
// drop_packets_ mode. Record a marker into the new packet to indicate the
|
|
// data loss.
|
|
cur_packet_->set_previous_packet_dropped(true);
|
|
}
|
|
}
|
|
|
|
return handle;
|
|
}
|
|
|
|
// Called by the Message. We can get here in two cases:
|
|
// 1. In the middle of writing a Message,
|
|
// when |fragmenting_packet_| == true. In this case we want to update the
|
|
// chunk header with a partial packet and start a new partial packet in the
|
|
// new chunk.
|
|
// 2. While calling ReserveBytes() for the packet header in NewTracePacket().
|
|
// In this case |fragmenting_packet_| == false and we just want a new chunk
|
|
// without creating any fragments.
|
|
protozero::ContiguousMemoryRange TraceWriterImpl::GetNewBuffer() {
|
|
if (fragmenting_packet_ && drop_packets_) {
|
|
// We can't write the remaining data of the fragmenting packet to a new
|
|
// chunk, because we have already lost some of its data in the garbage
|
|
// chunk. Thus, we will wrap around in the garbage chunk, wait until the
|
|
// current packet was completed, and then attempt to get a new chunk from
|
|
// the SMB again. Instead, if |drop_packets_| is true and
|
|
// |fragmenting_packet_| is false, we try to acquire a valid chunk because
|
|
// the SMB exhaustion might be resolved.
|
|
retry_new_chunk_after_packet_ = true;
|
|
return protozero::ContiguousMemoryRange{
|
|
&g_garbage_chunk[0], &g_garbage_chunk[0] + sizeof(g_garbage_chunk)};
|
|
}
|
|
|
|
// Attempt to grab the next chunk before finalizing the current one, so that
|
|
// we know whether we need to start dropping packets before writing the
|
|
// current packet fragment's header.
|
|
ChunkHeader::Packets packets = {};
|
|
if (fragmenting_packet_) {
|
|
packets.count = 1;
|
|
packets.flags = ChunkHeader::kFirstPacketContinuesFromPrevChunk;
|
|
}
|
|
|
|
// The memory order of the stores below doesn't really matter. This |header|
|
|
// is just a local temporary object. The GetNewChunk() call below will copy it
|
|
// into the shared buffer with the proper barriers.
|
|
ChunkHeader header = {};
|
|
header.writer_id.store(id_, std::memory_order_relaxed);
|
|
header.chunk_id.store(next_chunk_id_, std::memory_order_relaxed);
|
|
header.packets.store(packets, std::memory_order_relaxed);
|
|
|
|
SharedMemoryABI::Chunk new_chunk =
|
|
shmem_arbiter_->GetNewChunk(header, buffer_exhausted_policy_);
|
|
if (!new_chunk.is_valid()) {
|
|
// Shared memory buffer exhausted, switch into |drop_packets_| mode. We'll
|
|
// drop data until the garbage chunk has been filled once and then retry.
|
|
|
|
// If we started a packet in one of the previous (valid) chunks, we need to
|
|
// tell the service to discard it.
|
|
if (fragmenting_packet_) {
|
|
// We can only end up here if the previous chunk was a valid chunk,
|
|
// because we never try to acquire a new chunk in |drop_packets_| mode
|
|
// while fragmenting.
|
|
PERFETTO_DCHECK(!drop_packets_);
|
|
|
|
// Backfill the last fragment's header with an invalid size (too large),
|
|
// so that the service's TraceBuffer throws out the incomplete packet.
|
|
// It'll restart reading from the next chunk we submit.
|
|
WriteRedundantVarInt(SharedMemoryABI::kPacketSizeDropPacket,
|
|
cur_packet_->size_field());
|
|
|
|
// Reset the size field, since we should not write the current packet's
|
|
// size anymore after this.
|
|
cur_packet_->set_size_field(nullptr);
|
|
|
|
// We don't set kLastPacketContinuesOnNextChunk or kChunkNeedsPatching on
|
|
// the last chunk, because its last fragment will be discarded anyway.
|
|
// However, the current packet fragment points to a valid |cur_chunk_| and
|
|
// may have non-finalized nested messages which will continue in the
|
|
// garbage chunk and currently still point into |cur_chunk_|. As we are
|
|
// about to return |cur_chunk_|, we need to invalidate the size fields of
|
|
// those nested messages. Normally we move them in the |patch_list_| (see
|
|
// below) but in this case, it doesn't make sense to send patches for a
|
|
// fragment that will be discarded for sure. Thus, we clean up any size
|
|
// field references into |cur_chunk_|.
|
|
for (auto* nested_msg = cur_packet_->nested_message(); nested_msg;
|
|
nested_msg = nested_msg->nested_message()) {
|
|
uint8_t* const cur_hdr = nested_msg->size_field();
|
|
|
|
// If this is false the protozero Message has already been instructed to
|
|
// write, upon Finalize(), its size into the patch list.
|
|
bool size_field_points_within_chunk =
|
|
cur_hdr >= cur_chunk_.payload_begin() &&
|
|
cur_hdr + kMessageLengthFieldSize <= cur_chunk_.end();
|
|
|
|
if (size_field_points_within_chunk)
|
|
nested_msg->set_size_field(nullptr);
|
|
}
|
|
} else if (!drop_packets_ && last_packet_size_field_) {
|
|
// If we weren't dropping packets before, we should indicate to the
|
|
// service that we're about to lose data. We do this by invalidating the
|
|
// size of the last packet in |cur_chunk_|. The service will record
|
|
// statistics about packets with kPacketSizeDropPacket size.
|
|
PERFETTO_DCHECK(cur_packet_->is_finalized());
|
|
PERFETTO_DCHECK(cur_chunk_.is_valid());
|
|
|
|
// |last_packet_size_field_| should point within |cur_chunk_|'s payload.
|
|
PERFETTO_DCHECK(last_packet_size_field_ >= cur_chunk_.payload_begin() &&
|
|
last_packet_size_field_ + kMessageLengthFieldSize <=
|
|
cur_chunk_.end());
|
|
|
|
WriteRedundantVarInt(SharedMemoryABI::kPacketSizeDropPacket,
|
|
last_packet_size_field_);
|
|
}
|
|
|
|
if (cur_chunk_.is_valid()) {
|
|
shmem_arbiter_->ReturnCompletedChunk(std::move(cur_chunk_),
|
|
target_buffer_, &patch_list_);
|
|
}
|
|
|
|
drop_packets_ = true;
|
|
cur_chunk_ = SharedMemoryABI::Chunk(); // Reset to an invalid chunk.
|
|
reached_max_packets_per_chunk_ = false;
|
|
retry_new_chunk_after_packet_ = false;
|
|
last_packet_size_field_ = nullptr;
|
|
|
|
PERFETTO_ANNOTATE_BENIGN_RACE_SIZED(&g_garbage_chunk,
|
|
sizeof(g_garbage_chunk),
|
|
"nobody reads the garbage chunk")
|
|
return protozero::ContiguousMemoryRange{
|
|
&g_garbage_chunk[0], &g_garbage_chunk[0] + sizeof(g_garbage_chunk)};
|
|
} // if (!new_chunk.is_valid())
|
|
|
|
PERFETTO_DCHECK(new_chunk.is_valid());
|
|
|
|
if (fragmenting_packet_) {
|
|
// We should not be fragmenting a packet after we exited drop_packets_ mode,
|
|
// because we only retry to get a new chunk when a fresh packet is started.
|
|
PERFETTO_DCHECK(!drop_packets_);
|
|
|
|
uint8_t* const wptr = protobuf_stream_writer_.write_ptr();
|
|
PERFETTO_DCHECK(wptr >= cur_fragment_start_);
|
|
uint32_t partial_size = static_cast<uint32_t>(wptr - cur_fragment_start_);
|
|
PERFETTO_DCHECK(partial_size < cur_chunk_.size());
|
|
|
|
// Backfill the packet header with the fragment size.
|
|
PERFETTO_DCHECK(partial_size > 0);
|
|
cur_packet_->inc_size_already_written(partial_size);
|
|
cur_chunk_.SetFlag(ChunkHeader::kLastPacketContinuesOnNextChunk);
|
|
WriteRedundantVarInt(partial_size, cur_packet_->size_field());
|
|
|
|
// Descend in the stack of non-finalized nested submessages (if any) and
|
|
// detour their |size_field| into the |patch_list_|. At this point we have
|
|
// to release the chunk and they cannot write anymore into that.
|
|
// TODO(primiano): add tests to cover this logic.
|
|
bool chunk_needs_patching = false;
|
|
for (auto* nested_msg = cur_packet_->nested_message(); nested_msg;
|
|
nested_msg = nested_msg->nested_message()) {
|
|
uint8_t* const cur_hdr = nested_msg->size_field();
|
|
|
|
// If this is false the protozero Message has already been instructed to
|
|
// write, upon Finalize(), its size into the patch list.
|
|
bool size_field_points_within_chunk =
|
|
cur_hdr >= cur_chunk_.payload_begin() &&
|
|
cur_hdr + kMessageLengthFieldSize <= cur_chunk_.end();
|
|
|
|
if (size_field_points_within_chunk) {
|
|
auto offset =
|
|
static_cast<uint16_t>(cur_hdr - cur_chunk_.payload_begin());
|
|
const ChunkID cur_chunk_id =
|
|
cur_chunk_.header()->chunk_id.load(std::memory_order_relaxed);
|
|
Patch* patch = patch_list_.emplace_back(cur_chunk_id, offset);
|
|
nested_msg->set_size_field(&patch->size_field[0]);
|
|
chunk_needs_patching = true;
|
|
} else {
|
|
#if PERFETTO_DCHECK_IS_ON()
|
|
// Ensure that the size field of the message points to an element of the
|
|
// patch list.
|
|
auto patch_it = std::find_if(
|
|
patch_list_.begin(), patch_list_.end(),
|
|
[cur_hdr](const Patch& p) { return &p.size_field[0] == cur_hdr; });
|
|
PERFETTO_DCHECK(patch_it != patch_list_.end());
|
|
#endif
|
|
}
|
|
} // for(nested_msg
|
|
|
|
if (chunk_needs_patching)
|
|
cur_chunk_.SetFlag(ChunkHeader::kChunkNeedsPatching);
|
|
} // if(fragmenting_packet)
|
|
|
|
if (cur_chunk_.is_valid()) {
|
|
// ReturnCompletedChunk will consume the first patched entries from
|
|
// |patch_list_| and shrink it.
|
|
shmem_arbiter_->ReturnCompletedChunk(std::move(cur_chunk_), target_buffer_,
|
|
&patch_list_);
|
|
}
|
|
|
|
// Switch to the new chunk.
|
|
drop_packets_ = false;
|
|
reached_max_packets_per_chunk_ = false;
|
|
retry_new_chunk_after_packet_ = false;
|
|
next_chunk_id_++;
|
|
cur_chunk_ = std::move(new_chunk);
|
|
last_packet_size_field_ = nullptr;
|
|
|
|
uint8_t* payload_begin = cur_chunk_.payload_begin();
|
|
if (fragmenting_packet_) {
|
|
cur_packet_->set_size_field(payload_begin);
|
|
last_packet_size_field_ = payload_begin;
|
|
memset(payload_begin, 0, kPacketHeaderSize);
|
|
payload_begin += kPacketHeaderSize;
|
|
cur_fragment_start_ = payload_begin;
|
|
}
|
|
|
|
return protozero::ContiguousMemoryRange{payload_begin, cur_chunk_.end()};
|
|
}
|
|
|
|
WriterID TraceWriterImpl::writer_id() const {
|
|
return id_;
|
|
}
|
|
|
|
// Base class definitions.
|
|
TraceWriter::TraceWriter() = default;
|
|
TraceWriter::~TraceWriter() = default;
|
|
|
|
} // namespace perfetto
|
|
// gen_amalgamated begin source: src/tracing/core/virtual_destructors.cc
|
|
// gen_amalgamated begin header: include/perfetto/ext/tracing/core/consumer.h
|
|
// gen_amalgamated begin header: include/perfetto/ext/tracing/core/observable_events.h
|
|
/*
|
|
* Copyright (C) 2017 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
#ifndef INCLUDE_PERFETTO_EXT_TRACING_CORE_OBSERVABLE_EVENTS_H_
|
|
#define INCLUDE_PERFETTO_EXT_TRACING_CORE_OBSERVABLE_EVENTS_H_
|
|
|
|
// Creates the aliases in the ::perfetto namespace, doing things like:
|
|
// using ::perfetto::Foo = ::perfetto::protos::gen::Foo.
|
|
// See comments in forward_decls.h for the historical reasons of this
|
|
// indirection layer.
|
|
// gen_amalgamated expanded: #include "perfetto/tracing/core/forward_decls.h"
|
|
|
|
// gen_amalgamated expanded: #include "protos/perfetto/common/observable_events.gen.h"
|
|
|
|
#endif // INCLUDE_PERFETTO_EXT_TRACING_CORE_OBSERVABLE_EVENTS_H_
|
|
/*
|
|
* Copyright (C) 2017 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
#ifndef INCLUDE_PERFETTO_EXT_TRACING_CORE_CONSUMER_H_
|
|
#define INCLUDE_PERFETTO_EXT_TRACING_CORE_CONSUMER_H_
|
|
|
|
#include <vector>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/base/export.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/basic_types.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/observable_events.h"
|
|
// gen_amalgamated expanded: #include "perfetto/tracing/core/forward_decls.h"
|
|
namespace perfetto {
|
|
|
|
class TracePacket;
|
|
|
|
class PERFETTO_EXPORT Consumer {
|
|
public:
|
|
virtual ~Consumer();
|
|
|
|
// Called by Service (or more typically by the transport layer, on behalf of
|
|
// the remote Service), once the Consumer <> Service connection has been
|
|
// established.
|
|
virtual void OnConnect() = 0;
|
|
|
|
// Called by the Service or by the transport layer if the connection with the
|
|
// service drops, either voluntarily (e.g., by destroying the ConsumerEndpoint
|
|
// obtained through Service::ConnectConsumer()) or involuntarily (e.g., if the
|
|
// Service process crashes).
|
|
virtual void OnDisconnect() = 0;
|
|
|
|
// Called by the Service after the tracing session has ended. This can happen
|
|
// for a variety of reasons:
|
|
// - The consumer explicitly called DisableTracing()
|
|
// - The TraceConfig's |duration_ms| has been reached.
|
|
// - The TraceConfig's |max_file_size_bytes| has been reached.
|
|
// - An error occurred while trying to enable tracing.
|
|
virtual void OnTracingDisabled() = 0;
|
|
|
|
// Called back by the Service (or transport layer) after invoking
|
|
// TracingService::ConsumerEndpoint::ReadBuffers(). This function can be
|
|
// called more than once. Each invocation can carry one or more
|
|
// TracePacket(s). Upon the last call, |has_more| is set to true (i.e.
|
|
// |has_more| is a !EOF).
|
|
virtual void OnTraceData(std::vector<TracePacket>, bool has_more) = 0;
|
|
|
|
// Called back by the Service (or transport layer) after invoking
|
|
// TracingService::ConsumerEndpoint::Detach().
|
|
// The consumer can disconnect at this point and the trace session will keep
|
|
// on going. A new consumer can later re-attach passing back the same |key|
|
|
// passed to Detach(), but only if the two requests come from the same uid.
|
|
virtual void OnDetach(bool success) = 0;
|
|
|
|
// Called back by the Service (or transport layer) after invoking
|
|
// TracingService::ConsumerEndpoint::Attach().
|
|
virtual void OnAttach(bool success, const TraceConfig&) = 0;
|
|
|
|
// Called back by the Service (or transport layer) after invoking
|
|
// TracingService::ConsumerEndpoint::GetTraceStats().
|
|
virtual void OnTraceStats(bool success, const TraceStats&) = 0;
|
|
|
|
// Called back by the Service (or transport layer) after invoking
|
|
// TracingService::ConsumerEndpoint::ObserveEvents() whenever one or more
|
|
// ObservableEvents of enabled event types occur.
|
|
virtual void OnObservableEvents(const ObservableEvents&) = 0;
|
|
};
|
|
|
|
} // namespace perfetto
|
|
|
|
#endif // INCLUDE_PERFETTO_EXT_TRACING_CORE_CONSUMER_H_
|
|
// gen_amalgamated begin header: include/perfetto/ext/tracing/core/producer.h
|
|
/*
|
|
* Copyright (C) 2017 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
#ifndef INCLUDE_PERFETTO_EXT_TRACING_CORE_PRODUCER_H_
|
|
#define INCLUDE_PERFETTO_EXT_TRACING_CORE_PRODUCER_H_
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/base/export.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/basic_types.h"
|
|
// gen_amalgamated expanded: #include "perfetto/tracing/core/forward_decls.h"
|
|
namespace perfetto {
|
|
|
|
class SharedMemory;
|
|
|
|
// A Producer is an entity that connects to the write-only port of the Service
|
|
// and exposes the ability to produce performance data on-demand. The lifecycle
|
|
// of a Producer is as follows:
|
|
// 1. The producer connects to the service and advertises its data sources
|
|
// (e.g., the ability to get kernel ftraces, to list process stats).
|
|
// 2. The service acknowledges the connection and sends over the SharedMemory
|
|
// region that will be used to exchange data (together with the signalling
|
|
// API TracingService::ProducerEndpoint::OnPageAcquired()/OnPageReleased()).
|
|
// 3. At some point later on, the Service asks the Producer to turn on some of
|
|
// the previously registered data sources, together with some configuration
|
|
// parameters. This happens via the StartDataSource() callback.
|
|
// 4. In response to that the Producer will spawn an instance of the given data
|
|
// source and inject its data into the shared memory buffer (obtained during
|
|
// OnConnect).
|
|
// This interface is subclassed by:
|
|
// 1. The actual producer code in the clients e.g., the ftrace reader process.
|
|
// 2. The transport layer when interposing RPC between service and producers.
|
|
class PERFETTO_EXPORT Producer {
|
|
public:
|
|
virtual ~Producer();
|
|
|
|
// Called by Service (or more typically by the transport layer, on behalf of
|
|
// the remote Service), once the Producer <> Service connection has been
|
|
// established.
|
|
virtual void OnConnect() = 0;
|
|
|
|
// Called by the Service or by the transport layer if the connection with the
|
|
// service drops, either voluntarily (e.g., by destroying the ProducerEndpoint
|
|
// obtained through Service::ConnectProducer()) or involuntarily (e.g., if the
|
|
// Service process crashes).
|
|
// The Producer is expected to tear down all its data sources if this happens.
|
|
// Once this call returns it is possible to safely destroy the Producer
|
|
// instance.
|
|
virtual void OnDisconnect() = 0;
|
|
|
|
// Called by the Service after OnConnect but before the first DataSource is
|
|
// created. Can be used for any setup required before tracing begins.
|
|
virtual void OnTracingSetup() = 0;
|
|
|
|
// The lifecycle methods below are always called in the following sequence:
|
|
// SetupDataSource -> StartDataSource -> StopDataSource.
|
|
// Or, in the edge case where a trace is aborted immediately:
|
|
// SetupDataSource -> StopDataSource.
|
|
// The Setup+Start call sequence is always guaranateed, regardless of the
|
|
// TraceConfig.deferred_start flags.
|
|
// Called by the Service to configure one of the data sources previously
|
|
// registered through TracingService::ProducerEndpoint::RegisterDataSource().
|
|
// This method is always called before StartDataSource. There is always a
|
|
// SetupDataSource() call before each StartDataSource() call.
|
|
// Args:
|
|
// - DataSourceInstanceID is an identifier chosen by the Service that should
|
|
// be assigned to the newly created data source instance. It is used to
|
|
// match the StopDataSource() request below.
|
|
// - DataSourceConfig is the configuration for the new data source (e.g.,
|
|
// tells which trace categories to enable).
|
|
virtual void SetupDataSource(DataSourceInstanceID,
|
|
const DataSourceConfig&) = 0;
|
|
|
|
// Called by the Service to turn on one of the data sources previously
|
|
// registered through TracingService::ProducerEndpoint::RegisterDataSource()
|
|
// and initialized through SetupDataSource().
|
|
// Both arguments are guaranteed to be identical to the ones passed to the
|
|
// prior SetupDataSource() call.
|
|
virtual void StartDataSource(DataSourceInstanceID,
|
|
const DataSourceConfig&) = 0;
|
|
|
|
// Called by the Service to shut down an existing data source instance.
|
|
virtual void StopDataSource(DataSourceInstanceID) = 0;
|
|
|
|
// Called by the service to request the Producer to commit the data of the
|
|
// given data sources and return their chunks into the shared memory buffer.
|
|
// The Producer is expected to invoke NotifyFlushComplete(FlushRequestID) on
|
|
// the Service after the data has been committed. The producer has to either
|
|
// reply to the flush requests in order, or can just reply to the latest one
|
|
// Upon seeing a NotifyFlushComplete(N), the service will assume that all
|
|
// flushes < N have also been committed.
|
|
virtual void Flush(FlushRequestID,
|
|
const DataSourceInstanceID* data_source_ids,
|
|
size_t num_data_sources) = 0;
|
|
|
|
// Called by the service to instruct the given data sources to stop referring
|
|
// to any trace contents emitted so far. The intent is that after processing
|
|
// this call, the rest of the trace should be parsable even if all of the
|
|
// packets emitted so far have been lost (for example due to ring buffer
|
|
// overwrites).
|
|
//
|
|
// Called only for Producers with active data sources that have opted in by
|
|
// setting |handles_incremental_state_clear| in their DataSourceDescriptor.
|
|
//
|
|
// The way this call is handled is up to the individual Producer
|
|
// implementation. Some might wish to emit invalidation markers in the trace
|
|
// (see TracePacket.incremental_state_cleared for an existing field), and
|
|
// handle them when parsing the trace.
|
|
virtual void ClearIncrementalState(
|
|
const DataSourceInstanceID* data_source_ids,
|
|
size_t num_data_sources) = 0;
|
|
};
|
|
|
|
} // namespace perfetto
|
|
|
|
#endif // INCLUDE_PERFETTO_EXT_TRACING_CORE_PRODUCER_H_
|
|
/*
|
|
* Copyright (C) 2018 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/consumer.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/producer.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/shared_memory.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/shared_memory_arbiter.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/tracing_service.h"
|
|
|
|
// This translation unit contains the definitions for the destructor of pure
|
|
// virtual interfaces for the current build target. The alternative would be
|
|
// introducing a one-liner .cc file for each pure virtual interface, which is
|
|
// overkill. This is for compliance with -Wweak-vtables.
|
|
|
|
namespace perfetto {
|
|
|
|
Consumer::~Consumer() = default;
|
|
Producer::~Producer() = default;
|
|
TracingService::~TracingService() = default;
|
|
ConsumerEndpoint::~ConsumerEndpoint() = default;
|
|
ProducerEndpoint::~ProducerEndpoint() = default;
|
|
SharedMemory::~SharedMemory() = default;
|
|
SharedMemory::Factory::~Factory() = default;
|
|
SharedMemoryArbiter::~SharedMemoryArbiter() = default;
|
|
|
|
} // namespace perfetto
|
|
// gen_amalgamated begin source: src/tracing/data_source.cc
|
|
/*
|
|
* Copyright (C) 2019 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/tracing/data_source.h"
|
|
|
|
namespace perfetto {
|
|
|
|
DataSourceBase::StopArgs::~StopArgs() = default;
|
|
DataSourceBase::~DataSourceBase() = default;
|
|
void DataSourceBase::OnSetup(const SetupArgs&) {}
|
|
void DataSourceBase::OnStart(const StartArgs&) {}
|
|
void DataSourceBase::OnStop(const StopArgs&) {}
|
|
|
|
} // namespace perfetto
|
|
// gen_amalgamated begin source: src/tracing/debug_annotation.cc
|
|
/*
|
|
* Copyright (C) 2019 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/tracing/debug_annotation.h"
|
|
|
|
// gen_amalgamated expanded: #include "protos/perfetto/trace/track_event/debug_annotation.pbzero.h"
|
|
|
|
namespace perfetto {
|
|
|
|
DebugAnnotation::~DebugAnnotation() = default;
|
|
|
|
namespace internal {
|
|
|
|
void WriteDebugAnnotation(protos::pbzero::DebugAnnotation* annotation,
|
|
const char* value) {
|
|
annotation->set_string_value(value);
|
|
}
|
|
|
|
void WriteDebugAnnotation(protos::pbzero::DebugAnnotation* annotation,
|
|
const std::string& value) {
|
|
annotation->set_string_value(value);
|
|
}
|
|
|
|
void WriteDebugAnnotation(protos::pbzero::DebugAnnotation* annotation,
|
|
const void* value) {
|
|
annotation->set_pointer_value(reinterpret_cast<uint64_t>(value));
|
|
}
|
|
|
|
void WriteDebugAnnotation(protos::pbzero::DebugAnnotation* annotation,
|
|
const DebugAnnotation& custom_annotation) {
|
|
custom_annotation.Add(annotation);
|
|
}
|
|
|
|
} // namespace internal
|
|
} // namespace perfetto
|
|
// gen_amalgamated begin source: src/tracing/event_context.cc
|
|
/*
|
|
* Copyright (C) 2019 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/tracing/event_context.h"
|
|
|
|
// gen_amalgamated expanded: #include "protos/perfetto/trace/interned_data/interned_data.pbzero.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/trace/track_event/track_event.pbzero.h"
|
|
|
|
namespace perfetto {
|
|
|
|
EventContext::EventContext(
|
|
EventContext::TracePacketHandle trace_packet,
|
|
internal::TrackEventIncrementalState* incremental_state)
|
|
: trace_packet_(std::move(trace_packet)),
|
|
event_(trace_packet_->set_track_event()),
|
|
incremental_state_(incremental_state) {}
|
|
|
|
EventContext::~EventContext() {
|
|
if (!trace_packet_)
|
|
return;
|
|
|
|
// When the track event is finalized (i.e., the context is destroyed), we
|
|
// should flush any newly seen interned data to the trace. The data has
|
|
// earlier been written to a heap allocated protobuf message
|
|
// (|serialized_interned_data|). Here we just need to flush it to the main
|
|
// trace.
|
|
auto& serialized_interned_data = incremental_state_->serialized_interned_data;
|
|
if (PERFETTO_LIKELY(serialized_interned_data.empty()))
|
|
return;
|
|
|
|
auto ranges = serialized_interned_data.GetRanges();
|
|
trace_packet_->AppendScatteredBytes(
|
|
perfetto::protos::pbzero::TracePacket::kInternedDataFieldNumber,
|
|
&ranges[0], ranges.size());
|
|
|
|
// Reset the message but keep one buffer allocated for future use.
|
|
serialized_interned_data.Reset();
|
|
}
|
|
|
|
} // namespace perfetto
|
|
// gen_amalgamated begin source: src/tracing/internal/tracing_muxer_impl.cc
|
|
// gen_amalgamated begin header: src/tracing/internal/tracing_muxer_impl.h
|
|
/*
|
|
* Copyright (C) 2019 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
#ifndef SRC_TRACING_INTERNAL_TRACING_MUXER_IMPL_H_
|
|
#define SRC_TRACING_INTERNAL_TRACING_MUXER_IMPL_H_
|
|
|
|
#include <stddef.h>
|
|
#include <stdint.h>
|
|
|
|
#include <array>
|
|
#include <atomic>
|
|
#include <bitset>
|
|
#include <map>
|
|
#include <memory>
|
|
#include <vector>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/scoped_file.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/thread_checker.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/basic_types.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/consumer.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/producer.h"
|
|
// gen_amalgamated expanded: #include "perfetto/tracing/core/data_source_descriptor.h"
|
|
// gen_amalgamated expanded: #include "perfetto/tracing/core/forward_decls.h"
|
|
// gen_amalgamated expanded: #include "perfetto/tracing/core/trace_config.h"
|
|
// gen_amalgamated expanded: #include "perfetto/tracing/internal/basic_types.h"
|
|
// gen_amalgamated expanded: #include "perfetto/tracing/internal/tracing_muxer.h"
|
|
// gen_amalgamated expanded: #include "perfetto/tracing/tracing.h"
|
|
namespace perfetto {
|
|
|
|
class ConsumerEndpoint;
|
|
class DataSourceBase;
|
|
class ProducerEndpoint;
|
|
class TraceWriterBase;
|
|
class TracingBackend;
|
|
class TracingSession;
|
|
struct TracingInitArgs;
|
|
|
|
namespace base {
|
|
class TaskRunner;
|
|
}
|
|
|
|
namespace internal {
|
|
|
|
struct DataSourceStaticState;
|
|
|
|
// This class acts as a bridge between the public API and the TracingBackend(s).
|
|
// It exposes a simplified view of the world to the API methods handling all the
|
|
// bookkeeping to map data source instances and trace writers to the various
|
|
// backends. It deals with N data sources, M backends (1 backend == 1 tracing
|
|
// service == 1 producer connection) and T concurrent tracing sessions.
|
|
//
|
|
// Handing data source registration and start/stop flows [producer side]:
|
|
// ----------------------------------------------------------------------
|
|
// 1. The API client subclasses perfetto::DataSource and calls
|
|
// DataSource::Register<MyDataSource>(). In turn this calls into the
|
|
// TracingMuxer.
|
|
// 2. The tracing muxer iterates through all the backends (1 backend == 1
|
|
// service == 1 producer connection) and registers the data source on each
|
|
// backend.
|
|
// 3. When any (services behind a) backend starts tracing and requests to start
|
|
// that specific data source, the TracingMuxerImpl constructs a new instance
|
|
// of MyDataSource and calls the OnStart() method.
|
|
//
|
|
// Controlling trace and retrieving trace data [consumer side]:
|
|
// ------------------------------------------------------------
|
|
// 1. The API client calls Tracing::NewTrace(), returns a RAII TracingSession
|
|
// object.
|
|
// 2. NewTrace() calls into internal::TracingMuxer(Impl). TracingMuxer
|
|
// subclasses the TracingSession object (TracingSessionImpl) and returns it.
|
|
// 3. The tracing muxer identifies the backend (according to the args passed to
|
|
// NewTrace), creates a new Consumer and connects to it.
|
|
// 4. When the API client calls Start()/Stop()/ReadTrace() methods, the
|
|
// TracingMuxer forwards them to the consumer associated to the
|
|
// TracingSession. Likewise for callbacks coming from the consumer-side of
|
|
// the service.
|
|
class TracingMuxerImpl : public TracingMuxer {
|
|
public:
|
|
// This is different than TracingSessionID because it's global across all
|
|
// backends. TracingSessionID is global only within the scope of one service.
|
|
using TracingSessionGlobalID = uint64_t;
|
|
|
|
static void InitializeInstance(const TracingInitArgs&);
|
|
|
|
// TracingMuxer implementation.
|
|
bool RegisterDataSource(const DataSourceDescriptor&,
|
|
DataSourceFactory,
|
|
DataSourceStaticState*) override;
|
|
std::unique_ptr<TraceWriterBase> CreateTraceWriter(
|
|
DataSourceState*,
|
|
BufferExhaustedPolicy buffer_exhausted_policy) override;
|
|
void DestroyStoppedTraceWritersForCurrentThread() override;
|
|
|
|
std::unique_ptr<TracingSession> CreateTracingSession(BackendType);
|
|
|
|
// Producer-side bookkeeping methods.
|
|
void UpdateDataSourcesOnAllBackends();
|
|
void SetupDataSource(TracingBackendId,
|
|
DataSourceInstanceID,
|
|
const DataSourceConfig&);
|
|
void StartDataSource(TracingBackendId, DataSourceInstanceID);
|
|
void StopDataSource_AsyncBegin(TracingBackendId, DataSourceInstanceID);
|
|
void StopDataSource_AsyncEnd(TracingBackendId, DataSourceInstanceID);
|
|
|
|
// Consumer-side bookkeeping methods.
|
|
void SetupTracingSession(TracingSessionGlobalID,
|
|
const std::shared_ptr<TraceConfig>&,
|
|
base::ScopedFile trace_fd = base::ScopedFile());
|
|
void StartTracingSession(TracingSessionGlobalID);
|
|
void StopTracingSession(TracingSessionGlobalID);
|
|
void DestroyTracingSession(TracingSessionGlobalID);
|
|
void ReadTracingSessionData(
|
|
TracingSessionGlobalID,
|
|
std::function<void(TracingSession::ReadTraceCallbackArgs)>);
|
|
void GetTraceStats(TracingSessionGlobalID,
|
|
TracingSession::GetTraceStatsCallback);
|
|
|
|
private:
|
|
// For each TracingBackend we create and register one ProducerImpl instance.
|
|
// This talks to the producer-side of the service, gets start/stop requests
|
|
// from it and routes them to the registered data sources.
|
|
// One ProducerImpl == one backend == one tracing service.
|
|
// This class is needed to disambiguate callbacks coming from different
|
|
// services. TracingMuxerImpl can't directly implement the Producer interface
|
|
// because the Producer virtual methods don't allow to identify the service.
|
|
class ProducerImpl : public Producer {
|
|
public:
|
|
ProducerImpl(TracingMuxerImpl*,
|
|
TracingBackendId,
|
|
uint32_t shmem_batch_commits_duration_ms);
|
|
~ProducerImpl() override;
|
|
|
|
void Initialize(std::unique_ptr<ProducerEndpoint> endpoint);
|
|
void RegisterDataSource(const DataSourceDescriptor&,
|
|
DataSourceFactory,
|
|
DataSourceStaticState*);
|
|
|
|
// perfetto::Producer implementation.
|
|
void OnConnect() override;
|
|
void OnDisconnect() override;
|
|
void OnTracingSetup() override;
|
|
void SetupDataSource(DataSourceInstanceID,
|
|
const DataSourceConfig&) override;
|
|
void StartDataSource(DataSourceInstanceID,
|
|
const DataSourceConfig&) override;
|
|
void StopDataSource(DataSourceInstanceID) override;
|
|
void Flush(FlushRequestID, const DataSourceInstanceID*, size_t) override;
|
|
void ClearIncrementalState(const DataSourceInstanceID*, size_t) override;
|
|
|
|
PERFETTO_THREAD_CHECKER(thread_checker_)
|
|
TracingMuxerImpl* const muxer_;
|
|
TracingBackendId const backend_id_;
|
|
bool connected_ = false;
|
|
|
|
const uint32_t shmem_batch_commits_duration_ms_ = 0;
|
|
|
|
// Set of data sources that have been actually registered on this producer.
|
|
// This can be a subset of the global |data_sources_|, because data sources
|
|
// can register before the producer is fully connected.
|
|
std::bitset<kMaxDataSources> registered_data_sources_{};
|
|
|
|
std::unique_ptr<ProducerEndpoint> service_; // Keep last.
|
|
};
|
|
|
|
// For each TracingSession created by the API client (Tracing::NewTrace() we
|
|
// create and register one ConsumerImpl instance.
|
|
// This talks to the consumer-side of the service, gets end-of-trace and
|
|
// on-trace-data callbacks and routes them to the API client callbacks.
|
|
// This class is needed to disambiguate callbacks coming from different
|
|
// tracing sessions.
|
|
class ConsumerImpl : public Consumer {
|
|
public:
|
|
ConsumerImpl(TracingMuxerImpl*, TracingBackendId, TracingSessionGlobalID);
|
|
~ConsumerImpl() override;
|
|
|
|
void Initialize(std::unique_ptr<ConsumerEndpoint> endpoint);
|
|
|
|
// perfetto::Consumer implementation.
|
|
void OnConnect() override;
|
|
void OnDisconnect() override;
|
|
void OnTracingDisabled() override;
|
|
void OnTraceData(std::vector<TracePacket>, bool has_more) override;
|
|
void OnDetach(bool success) override;
|
|
void OnAttach(bool success, const TraceConfig&) override;
|
|
void OnTraceStats(bool success, const TraceStats&) override;
|
|
void OnObservableEvents(const ObservableEvents&) override;
|
|
|
|
void NotifyStartComplete();
|
|
void NotifyStopComplete();
|
|
|
|
// Will eventually inform the |muxer_| when it is safe to remove |this|.
|
|
void Disconnect();
|
|
|
|
TracingMuxerImpl* const muxer_;
|
|
TracingBackendId const backend_id_;
|
|
TracingSessionGlobalID const session_id_;
|
|
bool connected_ = false;
|
|
|
|
// This is to handle the case where the Setup call from the API client
|
|
// arrives before the consumer has connected. In this case we keep around
|
|
// the config and check if we have it after connection.
|
|
bool start_pending_ = false;
|
|
|
|
// Similarly if the session is stopped before the consumer was connected, we
|
|
// need to wait until the session has started before stopping it.
|
|
bool stop_pending_ = false;
|
|
|
|
// Similarly we need to buffer a call to get trace statistics if the
|
|
// consumer wasn't connected yet.
|
|
bool get_trace_stats_pending_ = false;
|
|
|
|
// Whether this session was already stopped. This will happen in response to
|
|
// Stop{,Blocking}, but also if the service stops the session for us
|
|
// automatically (e.g., when there are no data sources).
|
|
bool stopped_ = false;
|
|
|
|
// shared_ptr because it's posted across threads. This is to avoid copying
|
|
// it more than once.
|
|
std::shared_ptr<TraceConfig> trace_config_;
|
|
base::ScopedFile trace_fd_;
|
|
|
|
// If the API client passes a callback to start, we should invoke this when
|
|
// NotifyStartComplete() is invoked.
|
|
std::function<void()> start_complete_callback_;
|
|
|
|
// An internal callback used to implement StartBlocking().
|
|
std::function<void()> blocking_start_complete_callback_;
|
|
|
|
// If the API client passes a callback to stop, we should invoke this when
|
|
// OnTracingDisabled() is invoked.
|
|
std::function<void()> stop_complete_callback_;
|
|
|
|
// An internal callback used to implement StopBlocking().
|
|
std::function<void()> blocking_stop_complete_callback_;
|
|
|
|
// Callback passed to ReadTrace().
|
|
std::function<void(TracingSession::ReadTraceCallbackArgs)>
|
|
read_trace_callback_;
|
|
|
|
// Callback passed to GetTraceStats().
|
|
TracingSession::GetTraceStatsCallback get_trace_stats_callback_;
|
|
|
|
// The states of all data sources in this tracing session. |true| means the
|
|
// data source has started tracing.
|
|
using DataSourceHandle = std::pair<std::string, std::string>;
|
|
std::map<DataSourceHandle, bool> data_source_states_;
|
|
|
|
std::unique_ptr<ConsumerEndpoint> service_; // Keep before last.
|
|
PERFETTO_THREAD_CHECKER(thread_checker_) // Keep last.
|
|
};
|
|
|
|
// This object is returned to API clients when they call
|
|
// Tracing::CreateTracingSession().
|
|
class TracingSessionImpl : public TracingSession {
|
|
public:
|
|
TracingSessionImpl(TracingMuxerImpl*, TracingSessionGlobalID);
|
|
~TracingSessionImpl() override;
|
|
void Setup(const TraceConfig&, int fd) override;
|
|
void Start() override;
|
|
void StartBlocking() override;
|
|
void SetOnStartCallback(std::function<void()>) override;
|
|
void Stop() override;
|
|
void StopBlocking() override;
|
|
void ReadTrace(ReadTraceCallback) override;
|
|
void SetOnStopCallback(std::function<void()>) override;
|
|
void GetTraceStats(GetTraceStatsCallback) override;
|
|
|
|
private:
|
|
TracingMuxerImpl* const muxer_;
|
|
TracingSessionGlobalID const session_id_;
|
|
};
|
|
|
|
struct RegisteredDataSource {
|
|
DataSourceDescriptor descriptor;
|
|
DataSourceFactory factory{};
|
|
DataSourceStaticState* static_state = nullptr;
|
|
};
|
|
|
|
struct RegisteredBackend {
|
|
// Backends are supposed to have static lifetime.
|
|
TracingBackend* backend = nullptr;
|
|
TracingBackendId id = 0;
|
|
BackendType type{};
|
|
|
|
std::unique_ptr<ProducerImpl> producer;
|
|
|
|
// The calling code can request more than one concurrently active tracing
|
|
// session for the same backend. We need to create one consumer per session.
|
|
std::vector<std::unique_ptr<ConsumerImpl>> consumers;
|
|
};
|
|
|
|
explicit TracingMuxerImpl(const TracingInitArgs&);
|
|
void Initialize(const TracingInitArgs& args);
|
|
ConsumerImpl* FindConsumer(TracingSessionGlobalID session_id);
|
|
void OnConsumerDisconnected(ConsumerImpl* consumer);
|
|
|
|
struct FindDataSourceRes {
|
|
FindDataSourceRes() = default;
|
|
FindDataSourceRes(DataSourceStaticState* a, DataSourceState* b, uint32_t c)
|
|
: static_state(a), internal_state(b), instance_idx(c) {}
|
|
explicit operator bool() const { return !!internal_state; }
|
|
|
|
DataSourceStaticState* static_state = nullptr;
|
|
DataSourceState* internal_state = nullptr;
|
|
uint32_t instance_idx = 0;
|
|
};
|
|
FindDataSourceRes FindDataSource(TracingBackendId, DataSourceInstanceID);
|
|
|
|
std::unique_ptr<base::TaskRunner> task_runner_;
|
|
std::vector<RegisteredDataSource> data_sources_;
|
|
std::vector<RegisteredBackend> backends_;
|
|
|
|
std::atomic<TracingSessionGlobalID> next_tracing_session_id_{};
|
|
|
|
PERFETTO_THREAD_CHECKER(thread_checker_)
|
|
};
|
|
|
|
} // namespace internal
|
|
} // namespace perfetto
|
|
|
|
#endif // SRC_TRACING_INTERNAL_TRACING_MUXER_IMPL_H_
|
|
// gen_amalgamated begin header: include/perfetto/ext/tracing/core/trace_stats.h
|
|
/*
|
|
* Copyright (C) 2017 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
#ifndef INCLUDE_PERFETTO_EXT_TRACING_CORE_TRACE_STATS_H_
|
|
#define INCLUDE_PERFETTO_EXT_TRACING_CORE_TRACE_STATS_H_
|
|
|
|
// Creates the aliases in the ::perfetto namespace, doing things like:
|
|
// using ::perfetto::Foo = ::perfetto::protos::gen::Foo.
|
|
// See comments in forward_decls.h for the historical reasons of this
|
|
// indirection layer.
|
|
// gen_amalgamated expanded: #include "perfetto/tracing/core/forward_decls.h"
|
|
|
|
// gen_amalgamated expanded: #include "protos/perfetto/common/trace_stats.gen.h"
|
|
|
|
#endif // INCLUDE_PERFETTO_EXT_TRACING_CORE_TRACE_STATS_H_
|
|
/*
|
|
* Copyright (C) 2019 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
// gen_amalgamated expanded: #include "src/tracing/internal/tracing_muxer_impl.h"
|
|
|
|
#include <algorithm>
|
|
#include <atomic>
|
|
#include <mutex>
|
|
#include <vector>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/base/build_config.h"
|
|
// gen_amalgamated expanded: #include "perfetto/base/logging.h"
|
|
// gen_amalgamated expanded: #include "perfetto/base/task_runner.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/hash.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/thread_checker.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/waitable_event.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/shared_memory_arbiter.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/trace_packet.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/trace_stats.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/trace_writer.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/tracing_service.h"
|
|
// gen_amalgamated expanded: #include "perfetto/tracing/buffer_exhausted_policy.h"
|
|
// gen_amalgamated expanded: #include "perfetto/tracing/core/data_source_config.h"
|
|
// gen_amalgamated expanded: #include "perfetto/tracing/data_source.h"
|
|
// gen_amalgamated expanded: #include "perfetto/tracing/internal/data_source_internal.h"
|
|
// gen_amalgamated expanded: #include "perfetto/tracing/trace_writer_base.h"
|
|
// gen_amalgamated expanded: #include "perfetto/tracing/tracing.h"
|
|
// gen_amalgamated expanded: #include "perfetto/tracing/tracing_backend.h"
|
|
|
|
namespace perfetto {
|
|
namespace internal {
|
|
|
|
namespace {
|
|
|
|
class StopArgsImpl : public DataSourceBase::StopArgs {
|
|
public:
|
|
std::function<void()> HandleStopAsynchronously() const override {
|
|
auto closure = std::move(async_stop_closure);
|
|
async_stop_closure = std::function<void()>();
|
|
return closure;
|
|
}
|
|
|
|
mutable std::function<void()> async_stop_closure;
|
|
};
|
|
|
|
uint64_t ComputeConfigHash(const DataSourceConfig& config) {
|
|
base::Hash hasher;
|
|
std::string config_bytes = config.SerializeAsString();
|
|
hasher.Update(config_bytes.data(), config_bytes.size());
|
|
return hasher.digest();
|
|
}
|
|
|
|
} // namespace
|
|
|
|
// ----- Begin of TracingMuxerImpl::ProducerImpl
|
|
TracingMuxerImpl::ProducerImpl::ProducerImpl(
|
|
TracingMuxerImpl* muxer,
|
|
TracingBackendId backend_id,
|
|
uint32_t shmem_batch_commits_duration_ms)
|
|
: muxer_(muxer),
|
|
backend_id_(backend_id),
|
|
shmem_batch_commits_duration_ms_(shmem_batch_commits_duration_ms) {}
|
|
TracingMuxerImpl::ProducerImpl::~ProducerImpl() = default;
|
|
|
|
void TracingMuxerImpl::ProducerImpl::Initialize(
|
|
std::unique_ptr<ProducerEndpoint> endpoint) {
|
|
service_ = std::move(endpoint);
|
|
}
|
|
|
|
void TracingMuxerImpl::ProducerImpl::OnConnect() {
|
|
PERFETTO_DLOG("Producer connected");
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
PERFETTO_DCHECK(!connected_);
|
|
connected_ = true;
|
|
muxer_->UpdateDataSourcesOnAllBackends();
|
|
}
|
|
|
|
void TracingMuxerImpl::ProducerImpl::OnDisconnect() {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
connected_ = false;
|
|
// TODO: handle more gracefully. Right now we only handle the case of retrying
|
|
// when not being able to reach the service in the first place (this is
|
|
// handled transparently by ProducerIPCClientImpl).
|
|
// If the connection is dropped afterwards (e.g., traced crashes), instead, we
|
|
// don't recover from that. In order to handle that we would have to reconnect
|
|
// and re-register all the data sources.
|
|
PERFETTO_ELOG(
|
|
"The connection to the tracing service dropped. Tracing will no longer "
|
|
"work until this process is restarted");
|
|
}
|
|
|
|
void TracingMuxerImpl::ProducerImpl::OnTracingSetup() {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
service_->MaybeSharedMemoryArbiter()->SetBatchCommitsDuration(
|
|
shmem_batch_commits_duration_ms_);
|
|
}
|
|
|
|
void TracingMuxerImpl::ProducerImpl::SetupDataSource(
|
|
DataSourceInstanceID id,
|
|
const DataSourceConfig& cfg) {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
muxer_->SetupDataSource(backend_id_, id, cfg);
|
|
}
|
|
|
|
void TracingMuxerImpl::ProducerImpl::StartDataSource(DataSourceInstanceID id,
|
|
const DataSourceConfig&) {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
muxer_->StartDataSource(backend_id_, id);
|
|
service_->NotifyDataSourceStarted(id);
|
|
}
|
|
|
|
void TracingMuxerImpl::ProducerImpl::StopDataSource(DataSourceInstanceID id) {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
muxer_->StopDataSource_AsyncBegin(backend_id_, id);
|
|
}
|
|
|
|
void TracingMuxerImpl::ProducerImpl::Flush(FlushRequestID flush_id,
|
|
const DataSourceInstanceID*,
|
|
size_t) {
|
|
// Flush is not plumbed for now, we just ack straight away.
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
service_->NotifyFlushComplete(flush_id);
|
|
}
|
|
|
|
void TracingMuxerImpl::ProducerImpl::ClearIncrementalState(
|
|
const DataSourceInstanceID*,
|
|
size_t) {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
// TODO(skyostil): Mark each affected data source's incremental state as
|
|
// needing to be cleared.
|
|
}
|
|
// ----- End of TracingMuxerImpl::ProducerImpl methods.
|
|
|
|
// ----- Begin of TracingMuxerImpl::ConsumerImpl
|
|
TracingMuxerImpl::ConsumerImpl::ConsumerImpl(TracingMuxerImpl* muxer,
|
|
TracingBackendId backend_id,
|
|
TracingSessionGlobalID session_id)
|
|
: muxer_(muxer), backend_id_(backend_id), session_id_(session_id) {}
|
|
|
|
TracingMuxerImpl::ConsumerImpl::~ConsumerImpl() = default;
|
|
|
|
void TracingMuxerImpl::ConsumerImpl::Initialize(
|
|
std::unique_ptr<ConsumerEndpoint> endpoint) {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
service_ = std::move(endpoint);
|
|
// Observe data source instance events so we get notified when tracing starts.
|
|
service_->ObserveEvents(ObservableEvents::TYPE_DATA_SOURCES_INSTANCES);
|
|
}
|
|
|
|
void TracingMuxerImpl::ConsumerImpl::OnConnect() {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
PERFETTO_DCHECK(!connected_);
|
|
connected_ = true;
|
|
|
|
// If the API client configured and started tracing before we connected,
|
|
// tell the backend about it now.
|
|
if (trace_config_) {
|
|
muxer_->SetupTracingSession(session_id_, trace_config_);
|
|
if (start_pending_)
|
|
muxer_->StartTracingSession(session_id_);
|
|
if (get_trace_stats_pending_)
|
|
muxer_->GetTraceStats(session_id_, std::move(get_trace_stats_callback_));
|
|
if (stop_pending_)
|
|
muxer_->StopTracingSession(session_id_);
|
|
}
|
|
}
|
|
|
|
void TracingMuxerImpl::ConsumerImpl::OnDisconnect() {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
// It shouldn't be necessary to call StopTracingSession. If we get this call
|
|
// it means that the service did shutdown before us, so there is no point
|
|
// trying it to ask it to stop the session. We should just remember to cleanup
|
|
// the consumer vector.
|
|
connected_ = false;
|
|
|
|
// TODO notify the client somehow.
|
|
|
|
// Notify the muxer that it is safe to destroy |this|. This is needed because
|
|
// the ConsumerEndpoint stored in |service_| requires that |this| be safe to
|
|
// access until OnDisconnect() is called.
|
|
muxer_->OnConsumerDisconnected(this);
|
|
}
|
|
|
|
void TracingMuxerImpl::ConsumerImpl::Disconnect() {
|
|
// This is weird and deserves a comment.
|
|
//
|
|
// When we called the ConnectConsumer method on the service it returns
|
|
// us a ConsumerEndpoint which we stored in |service_|, however this
|
|
// ConsumerEndpoint holds a pointer to the ConsumerImpl pointed to by
|
|
// |this|. Part of the API contract to TracingService::ConnectConsumer is that
|
|
// the ConsumerImpl pointer has to be valid until the
|
|
// ConsumerImpl::OnDisconnect method is called. Therefore we reset the
|
|
// ConsumerEndpoint |service_|. Eventually this will call
|
|
// ConsumerImpl::OnDisconnect and we will inform the muxer it is safe to
|
|
// call the destructor of |this|.
|
|
service_.reset();
|
|
}
|
|
|
|
void TracingMuxerImpl::ConsumerImpl::OnTracingDisabled() {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
PERFETTO_DCHECK(!stopped_);
|
|
stopped_ = true;
|
|
// If we're still waiting for the start event, fire it now. This may happen if
|
|
// there are no active data sources in the session.
|
|
NotifyStartComplete();
|
|
NotifyStopComplete();
|
|
}
|
|
|
|
void TracingMuxerImpl::ConsumerImpl::NotifyStartComplete() {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
if (start_complete_callback_) {
|
|
muxer_->task_runner_->PostTask(std::move(start_complete_callback_));
|
|
start_complete_callback_ = nullptr;
|
|
}
|
|
if (blocking_start_complete_callback_) {
|
|
muxer_->task_runner_->PostTask(
|
|
std::move(blocking_start_complete_callback_));
|
|
blocking_start_complete_callback_ = nullptr;
|
|
}
|
|
}
|
|
|
|
void TracingMuxerImpl::ConsumerImpl::NotifyStopComplete() {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
if (stop_complete_callback_) {
|
|
muxer_->task_runner_->PostTask(std::move(stop_complete_callback_));
|
|
stop_complete_callback_ = nullptr;
|
|
}
|
|
if (blocking_stop_complete_callback_) {
|
|
muxer_->task_runner_->PostTask(std::move(blocking_stop_complete_callback_));
|
|
blocking_stop_complete_callback_ = nullptr;
|
|
}
|
|
}
|
|
|
|
void TracingMuxerImpl::ConsumerImpl::OnTraceData(
|
|
std::vector<TracePacket> packets,
|
|
bool has_more) {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
if (!read_trace_callback_)
|
|
return;
|
|
|
|
size_t capacity = 0;
|
|
for (const auto& packet : packets) {
|
|
// 16 is an over-estimation of the proto preamble size
|
|
capacity += packet.size() + 16;
|
|
}
|
|
|
|
// The shared_ptr is to avoid making a copy of the buffer when PostTask-ing.
|
|
std::shared_ptr<std::vector<char>> buf(new std::vector<char>());
|
|
buf->reserve(capacity);
|
|
for (auto& packet : packets) {
|
|
char* start;
|
|
size_t size;
|
|
std::tie(start, size) = packet.GetProtoPreamble();
|
|
buf->insert(buf->end(), start, start + size);
|
|
for (auto& slice : packet.slices()) {
|
|
const auto* slice_data = reinterpret_cast<const char*>(slice.start);
|
|
buf->insert(buf->end(), slice_data, slice_data + slice.size);
|
|
}
|
|
}
|
|
|
|
auto callback = read_trace_callback_;
|
|
muxer_->task_runner_->PostTask([callback, buf, has_more] {
|
|
TracingSession::ReadTraceCallbackArgs callback_arg{};
|
|
callback_arg.data = buf->size() ? &(*buf)[0] : nullptr;
|
|
callback_arg.size = buf->size();
|
|
callback_arg.has_more = has_more;
|
|
callback(callback_arg);
|
|
});
|
|
|
|
if (!has_more)
|
|
read_trace_callback_ = nullptr;
|
|
}
|
|
|
|
void TracingMuxerImpl::ConsumerImpl::OnObservableEvents(
|
|
const ObservableEvents& events) {
|
|
if (events.instance_state_changes_size()) {
|
|
for (const auto& state_change : events.instance_state_changes()) {
|
|
DataSourceHandle handle{state_change.producer_name(),
|
|
state_change.data_source_name()};
|
|
data_source_states_[handle] =
|
|
state_change.state() ==
|
|
ObservableEvents::DATA_SOURCE_INSTANCE_STATE_STARTED;
|
|
}
|
|
// Data sources are first reported as being stopped before starting, so once
|
|
// all the data sources we know about have started we can declare tracing
|
|
// begun.
|
|
if (start_complete_callback_ || blocking_start_complete_callback_) {
|
|
bool all_data_sources_started = std::all_of(
|
|
data_source_states_.cbegin(), data_source_states_.cend(),
|
|
[](std::pair<DataSourceHandle, bool> state) { return state.second; });
|
|
if (all_data_sources_started)
|
|
NotifyStartComplete();
|
|
}
|
|
}
|
|
}
|
|
|
|
void TracingMuxerImpl::ConsumerImpl::OnTraceStats(
|
|
bool success,
|
|
const TraceStats& trace_stats) {
|
|
if (!get_trace_stats_callback_)
|
|
return;
|
|
TracingSession::GetTraceStatsCallbackArgs callback_arg{};
|
|
callback_arg.success = success;
|
|
callback_arg.trace_stats_data = trace_stats.SerializeAsArray();
|
|
muxer_->task_runner_->PostTask(
|
|
std::bind(std::move(get_trace_stats_callback_), std::move(callback_arg)));
|
|
get_trace_stats_callback_ = nullptr;
|
|
}
|
|
|
|
// The callbacks below are not used.
|
|
void TracingMuxerImpl::ConsumerImpl::OnDetach(bool) {}
|
|
void TracingMuxerImpl::ConsumerImpl::OnAttach(bool, const TraceConfig&) {}
|
|
// ----- End of TracingMuxerImpl::ConsumerImpl
|
|
|
|
// ----- Begin of TracingMuxerImpl::TracingSessionImpl
|
|
|
|
// TracingSessionImpl is the RAII object returned to API clients when they
|
|
// invoke Tracing::CreateTracingSession. They use it for starting/stopping
|
|
// tracing.
|
|
|
|
TracingMuxerImpl::TracingSessionImpl::TracingSessionImpl(
|
|
TracingMuxerImpl* muxer,
|
|
TracingSessionGlobalID session_id)
|
|
: muxer_(muxer), session_id_(session_id) {}
|
|
|
|
// Can be destroyed from any thread.
|
|
TracingMuxerImpl::TracingSessionImpl::~TracingSessionImpl() {
|
|
auto* muxer = muxer_;
|
|
auto session_id = session_id_;
|
|
muxer->task_runner_->PostTask(
|
|
[muxer, session_id] { muxer->DestroyTracingSession(session_id); });
|
|
}
|
|
|
|
// Can be called from any thread.
|
|
void TracingMuxerImpl::TracingSessionImpl::Setup(const TraceConfig& cfg,
|
|
int fd) {
|
|
auto* muxer = muxer_;
|
|
auto session_id = session_id_;
|
|
std::shared_ptr<TraceConfig> trace_config(new TraceConfig(cfg));
|
|
if (fd >= 0) {
|
|
trace_config->set_write_into_file(true);
|
|
fd = dup(fd);
|
|
}
|
|
muxer->task_runner_->PostTask([muxer, session_id, trace_config, fd] {
|
|
muxer->SetupTracingSession(session_id, trace_config, base::ScopedFile(fd));
|
|
});
|
|
}
|
|
|
|
// Can be called from any thread.
|
|
void TracingMuxerImpl::TracingSessionImpl::Start() {
|
|
auto* muxer = muxer_;
|
|
auto session_id = session_id_;
|
|
muxer->task_runner_->PostTask(
|
|
[muxer, session_id] { muxer->StartTracingSession(session_id); });
|
|
}
|
|
|
|
// Can be called from any thread except the service thread.
|
|
void TracingMuxerImpl::TracingSessionImpl::StartBlocking() {
|
|
PERFETTO_DCHECK(!muxer_->task_runner_->RunsTasksOnCurrentThread());
|
|
auto* muxer = muxer_;
|
|
auto session_id = session_id_;
|
|
base::WaitableEvent tracing_started;
|
|
muxer->task_runner_->PostTask([muxer, session_id, &tracing_started] {
|
|
auto* consumer = muxer->FindConsumer(session_id);
|
|
PERFETTO_DCHECK(!consumer->blocking_start_complete_callback_);
|
|
consumer->blocking_start_complete_callback_ = [&] {
|
|
tracing_started.Notify();
|
|
};
|
|
muxer->StartTracingSession(session_id);
|
|
});
|
|
tracing_started.Wait();
|
|
}
|
|
|
|
// Can be called from any thread.
|
|
void TracingMuxerImpl::TracingSessionImpl::Stop() {
|
|
auto* muxer = muxer_;
|
|
auto session_id = session_id_;
|
|
muxer->task_runner_->PostTask(
|
|
[muxer, session_id] { muxer->StopTracingSession(session_id); });
|
|
}
|
|
|
|
// Can be called from any thread except the service thread.
|
|
void TracingMuxerImpl::TracingSessionImpl::StopBlocking() {
|
|
PERFETTO_DCHECK(!muxer_->task_runner_->RunsTasksOnCurrentThread());
|
|
auto* muxer = muxer_;
|
|
auto session_id = session_id_;
|
|
base::WaitableEvent tracing_stopped;
|
|
muxer->task_runner_->PostTask([muxer, session_id, &tracing_stopped] {
|
|
auto* consumer = muxer->FindConsumer(session_id);
|
|
PERFETTO_DCHECK(!consumer->blocking_stop_complete_callback_);
|
|
consumer->blocking_stop_complete_callback_ = [&] {
|
|
tracing_stopped.Notify();
|
|
};
|
|
muxer->StopTracingSession(session_id);
|
|
});
|
|
tracing_stopped.Wait();
|
|
}
|
|
|
|
// Can be called from any thread.
|
|
void TracingMuxerImpl::TracingSessionImpl::ReadTrace(ReadTraceCallback cb) {
|
|
auto* muxer = muxer_;
|
|
auto session_id = session_id_;
|
|
muxer->task_runner_->PostTask([muxer, session_id, cb] {
|
|
muxer->ReadTracingSessionData(session_id, std::move(cb));
|
|
});
|
|
}
|
|
|
|
// Can be called from any thread.
|
|
void TracingMuxerImpl::TracingSessionImpl::SetOnStartCallback(
|
|
std::function<void()> cb) {
|
|
auto* muxer = muxer_;
|
|
auto session_id = session_id_;
|
|
muxer->task_runner_->PostTask([muxer, session_id, cb] {
|
|
auto* consumer = muxer->FindConsumer(session_id);
|
|
consumer->start_complete_callback_ = cb;
|
|
});
|
|
}
|
|
|
|
// Can be called from any thread.
|
|
void TracingMuxerImpl::TracingSessionImpl::SetOnStopCallback(
|
|
std::function<void()> cb) {
|
|
auto* muxer = muxer_;
|
|
auto session_id = session_id_;
|
|
muxer->task_runner_->PostTask([muxer, session_id, cb] {
|
|
auto* consumer = muxer->FindConsumer(session_id);
|
|
consumer->stop_complete_callback_ = cb;
|
|
});
|
|
}
|
|
|
|
// Can be called from any thread.
|
|
void TracingMuxerImpl::TracingSessionImpl::GetTraceStats(
|
|
GetTraceStatsCallback cb) {
|
|
auto* muxer = muxer_;
|
|
auto session_id = session_id_;
|
|
muxer->task_runner_->PostTask([muxer, session_id, cb] {
|
|
muxer->GetTraceStats(session_id, std::move(cb));
|
|
});
|
|
}
|
|
|
|
// ----- End of TracingMuxerImpl::TracingSessionImpl
|
|
|
|
// static
|
|
TracingMuxer* TracingMuxer::instance_ = nullptr;
|
|
|
|
// This is called by perfetto::Tracing::Initialize().
|
|
// Can be called on any thread. Typically, but not necessarily, that will be
|
|
// the embedder's main thread.
|
|
TracingMuxerImpl::TracingMuxerImpl(const TracingInitArgs& args)
|
|
: TracingMuxer(args.platform ? args.platform
|
|
: Platform::GetDefaultPlatform()) {
|
|
PERFETTO_DETACH_FROM_THREAD(thread_checker_);
|
|
|
|
// Create the thread where muxer, producers and service will live.
|
|
task_runner_ = platform_->CreateTaskRunner({});
|
|
|
|
// Run the initializer on that thread.
|
|
task_runner_->PostTask([this, args] { Initialize(args); });
|
|
}
|
|
|
|
void TracingMuxerImpl::Initialize(const TracingInitArgs& args) {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_); // Rebind the thread checker.
|
|
|
|
auto add_backend = [this, &args](TracingBackend* backend, BackendType type) {
|
|
if (!backend) {
|
|
// We skip the log in release builds because the *_backend_fake.cc code
|
|
// has already an ELOG before returning a nullptr.
|
|
PERFETTO_DLOG("Backend creation failed, type %d", static_cast<int>(type));
|
|
return;
|
|
}
|
|
TracingBackendId backend_id = backends_.size();
|
|
backends_.emplace_back();
|
|
RegisteredBackend& rb = backends_.back();
|
|
rb.backend = backend;
|
|
rb.id = backend_id;
|
|
rb.type = type;
|
|
rb.producer.reset(new ProducerImpl(this, backend_id,
|
|
args.shmem_batch_commits_duration_ms));
|
|
TracingBackend::ConnectProducerArgs conn_args;
|
|
conn_args.producer = rb.producer.get();
|
|
conn_args.producer_name = platform_->GetCurrentProcessName();
|
|
conn_args.task_runner = task_runner_.get();
|
|
conn_args.shmem_size_hint_bytes = args.shmem_size_hint_kb * 1024;
|
|
conn_args.shmem_page_size_hint_bytes = args.shmem_page_size_hint_kb * 1024;
|
|
rb.producer->Initialize(rb.backend->ConnectProducer(conn_args));
|
|
};
|
|
|
|
if (args.backends & kSystemBackend) {
|
|
PERFETTO_CHECK(args.system_backend_factory_);
|
|
add_backend(args.system_backend_factory_(), kSystemBackend);
|
|
}
|
|
|
|
if (args.backends & kInProcessBackend) {
|
|
PERFETTO_CHECK(args.in_process_backend_factory_);
|
|
add_backend(args.in_process_backend_factory_(), kInProcessBackend);
|
|
}
|
|
|
|
if (args.backends & kCustomBackend) {
|
|
PERFETTO_CHECK(args.custom_backend);
|
|
add_backend(args.custom_backend, kCustomBackend);
|
|
}
|
|
|
|
if (args.backends & ~(kSystemBackend | kInProcessBackend | kCustomBackend)) {
|
|
PERFETTO_FATAL("Unsupported tracing backend type");
|
|
}
|
|
}
|
|
|
|
// Can be called from any thread (but not concurrently).
|
|
bool TracingMuxerImpl::RegisterDataSource(
|
|
const DataSourceDescriptor& descriptor,
|
|
DataSourceFactory factory,
|
|
DataSourceStaticState* static_state) {
|
|
// Ignore repeated registrations.
|
|
if (static_state->index != kMaxDataSources)
|
|
return true;
|
|
|
|
static std::atomic<uint32_t> last_id{};
|
|
uint32_t new_index = last_id++;
|
|
if (new_index >= kMaxDataSources) {
|
|
PERFETTO_DLOG(
|
|
"RegisterDataSource failed: too many data sources already registered");
|
|
return false;
|
|
}
|
|
|
|
// Initialize the static state.
|
|
static_assert(sizeof(static_state->instances[0]) >= sizeof(DataSourceState),
|
|
"instances[] size mismatch");
|
|
for (size_t i = 0; i < static_state->instances.size(); i++)
|
|
new (&static_state->instances[i]) DataSourceState{};
|
|
|
|
static_state->index = new_index;
|
|
|
|
task_runner_->PostTask([this, descriptor, factory, static_state] {
|
|
data_sources_.emplace_back();
|
|
RegisteredDataSource& rds = data_sources_.back();
|
|
rds.descriptor = descriptor;
|
|
rds.factory = factory;
|
|
rds.static_state = static_state;
|
|
UpdateDataSourcesOnAllBackends();
|
|
});
|
|
return true;
|
|
}
|
|
|
|
// Called by the service of one of the backends.
|
|
void TracingMuxerImpl::SetupDataSource(TracingBackendId backend_id,
|
|
DataSourceInstanceID instance_id,
|
|
const DataSourceConfig& cfg) {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
PERFETTO_DLOG("Setting up data source %" PRIu64 " %s", instance_id,
|
|
cfg.name().c_str());
|
|
uint64_t config_hash = ComputeConfigHash(cfg);
|
|
|
|
for (const auto& rds : data_sources_) {
|
|
if (rds.descriptor.name() != cfg.name())
|
|
continue;
|
|
DataSourceStaticState& static_state = *rds.static_state;
|
|
|
|
// If this data source is already active for this exact config, don't start
|
|
// another instance. This happens when we have several data sources with the
|
|
// same name, in which case the service sends one SetupDataSource event for
|
|
// each one. Since we can't map which event maps to which data source, we
|
|
// ensure each event only starts one data source instance.
|
|
// TODO(skyostil): Register a unique id with each data source to the service
|
|
// to disambiguate.
|
|
bool active_for_config = false;
|
|
for (uint32_t i = 0; i < kMaxDataSourceInstances; i++) {
|
|
if (!static_state.TryGet(i))
|
|
continue;
|
|
auto* internal_state =
|
|
reinterpret_cast<DataSourceState*>(&static_state.instances[i]);
|
|
if (internal_state->backend_id == backend_id &&
|
|
internal_state->config_hash == config_hash) {
|
|
active_for_config = true;
|
|
break;
|
|
}
|
|
}
|
|
if (active_for_config) {
|
|
PERFETTO_DLOG(
|
|
"Data source %s is already active with this config, skipping",
|
|
cfg.name().c_str());
|
|
continue;
|
|
}
|
|
|
|
for (uint32_t i = 0; i < kMaxDataSourceInstances; i++) {
|
|
// Find a free slot.
|
|
if (static_state.TryGet(i))
|
|
continue;
|
|
|
|
auto* internal_state =
|
|
reinterpret_cast<DataSourceState*>(&static_state.instances[i]);
|
|
std::lock_guard<std::recursive_mutex> guard(internal_state->lock);
|
|
static_assert(
|
|
std::is_same<decltype(internal_state->data_source_instance_id),
|
|
DataSourceInstanceID>::value,
|
|
"data_source_instance_id type mismatch");
|
|
internal_state->backend_id = backend_id;
|
|
internal_state->data_source_instance_id = instance_id;
|
|
internal_state->buffer_id =
|
|
static_cast<internal::BufferId>(cfg.target_buffer());
|
|
internal_state->config_hash = config_hash;
|
|
internal_state->data_source = rds.factory();
|
|
|
|
// This must be made at the end. See matching acquire-load in
|
|
// DataSource::Trace().
|
|
static_state.valid_instances.fetch_or(1 << i, std::memory_order_release);
|
|
|
|
DataSourceBase::SetupArgs setup_args;
|
|
setup_args.config = &cfg;
|
|
setup_args.internal_instance_index = i;
|
|
internal_state->data_source->OnSetup(setup_args);
|
|
return;
|
|
}
|
|
PERFETTO_ELOG(
|
|
"Maximum number of data source instances exhausted. "
|
|
"Dropping data source %" PRIu64,
|
|
instance_id);
|
|
break;
|
|
}
|
|
}
|
|
|
|
// Called by the service of one of the backends.
|
|
void TracingMuxerImpl::StartDataSource(TracingBackendId backend_id,
|
|
DataSourceInstanceID instance_id) {
|
|
PERFETTO_DLOG("Starting data source %" PRIu64, instance_id);
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
|
|
auto ds = FindDataSource(backend_id, instance_id);
|
|
if (!ds) {
|
|
PERFETTO_ELOG("Could not find data source to start");
|
|
return;
|
|
}
|
|
|
|
DataSourceBase::StartArgs start_args{};
|
|
start_args.internal_instance_index = ds.instance_idx;
|
|
|
|
std::lock_guard<std::recursive_mutex> guard(ds.internal_state->lock);
|
|
ds.internal_state->trace_lambda_enabled = true;
|
|
ds.internal_state->data_source->OnStart(start_args);
|
|
}
|
|
|
|
// Called by the service of one of the backends.
|
|
void TracingMuxerImpl::StopDataSource_AsyncBegin(
|
|
TracingBackendId backend_id,
|
|
DataSourceInstanceID instance_id) {
|
|
PERFETTO_DLOG("Stopping data source %" PRIu64, instance_id);
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
|
|
auto ds = FindDataSource(backend_id, instance_id);
|
|
if (!ds) {
|
|
PERFETTO_ELOG("Could not find data source to stop");
|
|
return;
|
|
}
|
|
|
|
StopArgsImpl stop_args{};
|
|
stop_args.internal_instance_index = ds.instance_idx;
|
|
stop_args.async_stop_closure = [this, backend_id, instance_id] {
|
|
// TracingMuxerImpl is long lived, capturing |this| is okay.
|
|
// The notification closure can be moved out of the StopArgs by the
|
|
// embedder to handle stop asynchronously. The embedder might then
|
|
// call the closure on a different thread than the current one, hence
|
|
// this nested PostTask().
|
|
task_runner_->PostTask([this, backend_id, instance_id] {
|
|
StopDataSource_AsyncEnd(backend_id, instance_id);
|
|
});
|
|
};
|
|
|
|
{
|
|
std::lock_guard<std::recursive_mutex> guard(ds.internal_state->lock);
|
|
ds.internal_state->data_source->OnStop(stop_args);
|
|
}
|
|
|
|
// If the embedder hasn't called StopArgs.HandleStopAsynchronously() run the
|
|
// async closure here. In theory we could avoid the PostTask and call
|
|
// straight into CompleteDataSourceAsyncStop(). We keep that to reduce
|
|
// divergencies between the deferred-stop vs non-deferred-stop code paths.
|
|
if (stop_args.async_stop_closure)
|
|
std::move(stop_args.async_stop_closure)();
|
|
}
|
|
|
|
void TracingMuxerImpl::StopDataSource_AsyncEnd(
|
|
TracingBackendId backend_id,
|
|
DataSourceInstanceID instance_id) {
|
|
PERFETTO_DLOG("Ending async stop of data source %" PRIu64, instance_id);
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
|
|
auto ds = FindDataSource(backend_id, instance_id);
|
|
if (!ds) {
|
|
PERFETTO_ELOG(
|
|
"Async stop of data source %" PRIu64
|
|
" failed. This might be due to calling the async_stop_closure twice.",
|
|
instance_id);
|
|
return;
|
|
}
|
|
|
|
const uint32_t mask = ~(1 << ds.instance_idx);
|
|
ds.static_state->valid_instances.fetch_and(mask, std::memory_order_acq_rel);
|
|
|
|
// Take the mutex to prevent that the data source is in the middle of
|
|
// a Trace() execution where it called GetDataSourceLocked() while we
|
|
// destroy it.
|
|
{
|
|
std::lock_guard<std::recursive_mutex> guard(ds.internal_state->lock);
|
|
ds.internal_state->trace_lambda_enabled = false;
|
|
ds.internal_state->data_source.reset();
|
|
}
|
|
|
|
// The other fields of internal_state are deliberately *not* cleared.
|
|
// See races-related comments of DataSource::Trace().
|
|
|
|
TracingMuxer::generation_++;
|
|
|
|
// |backends_| is append-only, Backend instances are always valid.
|
|
PERFETTO_CHECK(backend_id < backends_.size());
|
|
ProducerImpl* producer = backends_[backend_id].producer.get();
|
|
if (producer && producer->connected_) {
|
|
// Flush any commits that might have been batched by SharedMemoryArbiter.
|
|
producer->service_->MaybeSharedMemoryArbiter()
|
|
->FlushPendingCommitDataRequests();
|
|
producer->service_->NotifyDataSourceStopped(instance_id);
|
|
}
|
|
}
|
|
|
|
void TracingMuxerImpl::DestroyStoppedTraceWritersForCurrentThread() {
|
|
// Iterate across all possible data source types.
|
|
auto cur_generation = generation_.load(std::memory_order_acquire);
|
|
auto* root_tls = GetOrCreateTracingTLS();
|
|
|
|
auto destroy_stopped_instances = [](DataSourceThreadLocalState& tls) {
|
|
// |tls| has a vector of per-data-source-instance thread-local state.
|
|
DataSourceStaticState* static_state = tls.static_state;
|
|
if (!static_state)
|
|
return; // Slot not used.
|
|
|
|
// Iterate across all possible instances for this data source.
|
|
for (uint32_t inst = 0; inst < kMaxDataSourceInstances; inst++) {
|
|
DataSourceInstanceThreadLocalState& ds_tls = tls.per_instance[inst];
|
|
if (!ds_tls.trace_writer)
|
|
continue;
|
|
|
|
DataSourceState* ds_state = static_state->TryGet(inst);
|
|
if (ds_state && ds_state->backend_id == ds_tls.backend_id &&
|
|
ds_state->buffer_id == ds_tls.buffer_id) {
|
|
continue;
|
|
}
|
|
|
|
// The DataSource instance has been destroyed or recycled.
|
|
ds_tls.Reset(); // Will also destroy the |ds_tls.trace_writer|.
|
|
}
|
|
};
|
|
|
|
for (size_t ds_idx = 0; ds_idx < kMaxDataSources; ds_idx++) {
|
|
// |tls| has a vector of per-data-source-instance thread-local state.
|
|
DataSourceThreadLocalState& tls = root_tls->data_sources_tls[ds_idx];
|
|
destroy_stopped_instances(tls);
|
|
}
|
|
destroy_stopped_instances(root_tls->track_event_tls);
|
|
root_tls->generation = cur_generation;
|
|
}
|
|
|
|
// Called both when a new data source is registered or when a new backend
|
|
// connects. In both cases we want to be sure we reflected the data source
|
|
// registrations on the backends.
|
|
void TracingMuxerImpl::UpdateDataSourcesOnAllBackends() {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
for (RegisteredDataSource& rds : data_sources_) {
|
|
for (RegisteredBackend& backend : backends_) {
|
|
// We cannot call RegisterDataSource on the backend before it connects.
|
|
if (!backend.producer->connected_)
|
|
continue;
|
|
|
|
PERFETTO_DCHECK(rds.static_state->index < kMaxDataSourceInstances);
|
|
if (backend.producer->registered_data_sources_.test(
|
|
rds.static_state->index))
|
|
continue;
|
|
|
|
rds.descriptor.set_will_notify_on_start(true);
|
|
rds.descriptor.set_will_notify_on_stop(true);
|
|
backend.producer->service_->RegisterDataSource(rds.descriptor);
|
|
backend.producer->registered_data_sources_.set(rds.static_state->index);
|
|
}
|
|
}
|
|
}
|
|
|
|
void TracingMuxerImpl::SetupTracingSession(
|
|
TracingSessionGlobalID session_id,
|
|
const std::shared_ptr<TraceConfig>& trace_config,
|
|
base::ScopedFile trace_fd) {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
PERFETTO_CHECK(!trace_fd || trace_config->write_into_file());
|
|
|
|
auto* consumer = FindConsumer(session_id);
|
|
if (!consumer)
|
|
return;
|
|
|
|
consumer->trace_config_ = trace_config;
|
|
if (trace_fd)
|
|
consumer->trace_fd_ = std::move(trace_fd);
|
|
|
|
if (!consumer->connected_)
|
|
return;
|
|
|
|
// Only used in the deferred start mode.
|
|
if (trace_config->deferred_start()) {
|
|
consumer->service_->EnableTracing(*trace_config,
|
|
std::move(consumer->trace_fd_));
|
|
}
|
|
}
|
|
|
|
void TracingMuxerImpl::StartTracingSession(TracingSessionGlobalID session_id) {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
|
|
auto* consumer = FindConsumer(session_id);
|
|
|
|
if (!consumer)
|
|
return;
|
|
|
|
if (!consumer->trace_config_) {
|
|
PERFETTO_ELOG("Must call Setup(config) first");
|
|
return;
|
|
}
|
|
|
|
if (!consumer->connected_) {
|
|
consumer->start_pending_ = true;
|
|
return;
|
|
}
|
|
|
|
consumer->start_pending_ = false;
|
|
if (consumer->trace_config_->deferred_start()) {
|
|
consumer->service_->StartTracing();
|
|
} else {
|
|
consumer->service_->EnableTracing(*consumer->trace_config_,
|
|
std::move(consumer->trace_fd_));
|
|
}
|
|
|
|
// TODO implement support for the deferred-start + fast-triggering case.
|
|
}
|
|
|
|
void TracingMuxerImpl::StopTracingSession(TracingSessionGlobalID session_id) {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
auto* consumer = FindConsumer(session_id);
|
|
if (!consumer)
|
|
return;
|
|
|
|
if (consumer->start_pending_) {
|
|
// If the session hasn't started yet, wait until it does before stopping.
|
|
consumer->stop_pending_ = true;
|
|
return;
|
|
}
|
|
|
|
consumer->stop_pending_ = false;
|
|
if (consumer->stopped_) {
|
|
// If the session was already stopped (e.g., it failed to start), don't try
|
|
// stopping again.
|
|
consumer->NotifyStopComplete();
|
|
} else if (!consumer->trace_config_) {
|
|
PERFETTO_ELOG("Must call Setup(config) and Start() first");
|
|
return;
|
|
} else {
|
|
consumer->service_->DisableTracing();
|
|
}
|
|
|
|
consumer->trace_config_.reset();
|
|
}
|
|
|
|
void TracingMuxerImpl::DestroyTracingSession(
|
|
TracingSessionGlobalID session_id) {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
for (RegisteredBackend& backend : backends_) {
|
|
// We need to find the consumer (if any) and call Disconnect as we destroy
|
|
// the tracing session. We can't call Disconnect() inside this for loop
|
|
// because in the in-process case this will end up to a synchronous call to
|
|
// OnConsumerDisconnect which will invalidate all the iterators to
|
|
// |backend.consumers|.
|
|
ConsumerImpl* consumer = nullptr;
|
|
for (auto& con : backend.consumers) {
|
|
if (con->session_id_ == session_id) {
|
|
consumer = con.get();
|
|
break;
|
|
}
|
|
}
|
|
if (consumer) {
|
|
// We broke out of the loop above on the assumption that each backend will
|
|
// only have a single consumer per session. This DCHECK ensures that
|
|
// this is the case.
|
|
PERFETTO_DCHECK(
|
|
std::count_if(backend.consumers.begin(), backend.consumers.end(),
|
|
[session_id](const std::unique_ptr<ConsumerImpl>& con) {
|
|
return con->session_id_ == session_id;
|
|
}) == 1u);
|
|
consumer->Disconnect();
|
|
}
|
|
}
|
|
}
|
|
|
|
void TracingMuxerImpl::ReadTracingSessionData(
|
|
TracingSessionGlobalID session_id,
|
|
std::function<void(TracingSession::ReadTraceCallbackArgs)> callback) {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
auto* consumer = FindConsumer(session_id);
|
|
if (!consumer)
|
|
return;
|
|
PERFETTO_DCHECK(!consumer->read_trace_callback_);
|
|
consumer->read_trace_callback_ = std::move(callback);
|
|
consumer->service_->ReadBuffers();
|
|
}
|
|
|
|
void TracingMuxerImpl::GetTraceStats(
|
|
TracingSessionGlobalID session_id,
|
|
TracingSession::GetTraceStatsCallback callback) {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
auto* consumer = FindConsumer(session_id);
|
|
if (!consumer) {
|
|
TracingSession::GetTraceStatsCallbackArgs callback_arg{};
|
|
callback_arg.success = false;
|
|
callback(std::move(callback_arg));
|
|
return;
|
|
}
|
|
PERFETTO_DCHECK(!consumer->get_trace_stats_callback_);
|
|
consumer->get_trace_stats_callback_ = std::move(callback);
|
|
if (!consumer->connected_) {
|
|
consumer->get_trace_stats_pending_ = true;
|
|
return;
|
|
}
|
|
consumer->get_trace_stats_pending_ = false;
|
|
consumer->service_->GetTraceStats();
|
|
}
|
|
|
|
TracingMuxerImpl::ConsumerImpl* TracingMuxerImpl::FindConsumer(
|
|
TracingSessionGlobalID session_id) {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
for (RegisteredBackend& backend : backends_) {
|
|
for (auto& consumer : backend.consumers) {
|
|
if (consumer->session_id_ == session_id) {
|
|
PERFETTO_DCHECK(consumer->service_);
|
|
return consumer.get();
|
|
}
|
|
}
|
|
}
|
|
return nullptr;
|
|
}
|
|
|
|
void TracingMuxerImpl::OnConsumerDisconnected(ConsumerImpl* consumer) {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
for (RegisteredBackend& backend : backends_) {
|
|
auto pred = [consumer](const std::unique_ptr<ConsumerImpl>& con) {
|
|
return con.get() == consumer;
|
|
};
|
|
backend.consumers.erase(std::remove_if(backend.consumers.begin(),
|
|
backend.consumers.end(), pred),
|
|
backend.consumers.end());
|
|
}
|
|
}
|
|
|
|
TracingMuxerImpl::FindDataSourceRes TracingMuxerImpl::FindDataSource(
|
|
TracingBackendId backend_id,
|
|
DataSourceInstanceID instance_id) {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
for (const auto& rds : data_sources_) {
|
|
DataSourceStaticState* static_state = rds.static_state;
|
|
for (uint32_t i = 0; i < kMaxDataSourceInstances; i++) {
|
|
auto* internal_state = static_state->TryGet(i);
|
|
if (internal_state && internal_state->backend_id == backend_id &&
|
|
internal_state->data_source_instance_id == instance_id) {
|
|
return FindDataSourceRes(static_state, internal_state, i);
|
|
}
|
|
}
|
|
}
|
|
return FindDataSourceRes();
|
|
}
|
|
|
|
// Can be called from any thread.
|
|
std::unique_ptr<TraceWriterBase> TracingMuxerImpl::CreateTraceWriter(
|
|
DataSourceState* data_source,
|
|
BufferExhaustedPolicy buffer_exhausted_policy) {
|
|
ProducerImpl* producer = backends_[data_source->backend_id].producer.get();
|
|
return producer->service_->CreateTraceWriter(data_source->buffer_id,
|
|
buffer_exhausted_policy);
|
|
}
|
|
|
|
// This is called via the public API Tracing::NewTrace().
|
|
// Can be called from any thread.
|
|
std::unique_ptr<TracingSession> TracingMuxerImpl::CreateTracingSession(
|
|
BackendType backend_type) {
|
|
TracingSessionGlobalID session_id = ++next_tracing_session_id_;
|
|
|
|
// |backend_type| can only specify one backend, not an OR-ed mask.
|
|
PERFETTO_CHECK((backend_type & (backend_type - 1)) == 0);
|
|
|
|
// Capturing |this| is fine because the TracingMuxer is a leaky singleton.
|
|
task_runner_->PostTask([this, backend_type, session_id] {
|
|
for (RegisteredBackend& backend : backends_) {
|
|
if (backend_type && backend.type != backend_type)
|
|
continue;
|
|
|
|
backend.consumers.emplace_back(
|
|
new ConsumerImpl(this, backend.id, session_id));
|
|
auto& consumer = backend.consumers.back();
|
|
TracingBackend::ConnectConsumerArgs conn_args;
|
|
conn_args.consumer = consumer.get();
|
|
conn_args.task_runner = task_runner_.get();
|
|
consumer->Initialize(backend.backend->ConnectConsumer(conn_args));
|
|
return;
|
|
}
|
|
PERFETTO_ELOG(
|
|
"Cannot create tracing session, no tracing backend ready for type=%d",
|
|
backend_type);
|
|
});
|
|
|
|
return std::unique_ptr<TracingSession>(
|
|
new TracingSessionImpl(this, session_id));
|
|
}
|
|
|
|
void TracingMuxerImpl::InitializeInstance(const TracingInitArgs& args) {
|
|
if (instance_)
|
|
PERFETTO_FATAL("Tracing already initialized");
|
|
instance_ = new TracingMuxerImpl(args);
|
|
}
|
|
|
|
TracingMuxer::~TracingMuxer() = default;
|
|
|
|
static_assert(std::is_same<internal::BufferId, BufferID>::value,
|
|
"public's BufferId and tracing/core's BufferID diverged");
|
|
|
|
} // namespace internal
|
|
} // namespace perfetto
|
|
// gen_amalgamated begin source: src/tracing/internal/track_event_internal.cc
|
|
// gen_amalgamated begin header: gen/protos/perfetto/common/track_event_descriptor.pbzero.h
|
|
// Autogenerated by the ProtoZero compiler plugin. DO NOT EDIT.
|
|
|
|
#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_COMMON_TRACK_EVENT_DESCRIPTOR_PROTO_H_
|
|
#define PERFETTO_PROTOS_PROTOS_PERFETTO_COMMON_TRACK_EVENT_DESCRIPTOR_PROTO_H_
|
|
|
|
#include <stddef.h>
|
|
#include <stdint.h>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/proto_utils.h"
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace pbzero {
|
|
|
|
class TrackEventCategory;
|
|
|
|
class TrackEventDescriptor_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/1, /*HAS_NONPACKED_REPEATED_FIELDS=*/true> {
|
|
public:
|
|
TrackEventDescriptor_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
|
|
explicit TrackEventDescriptor_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
|
|
explicit TrackEventDescriptor_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
|
|
bool has_available_categories() const { return at<1>().valid(); }
|
|
::protozero::RepeatedFieldIterator<::protozero::ConstBytes> available_categories() const { return GetRepeated<::protozero::ConstBytes>(1); }
|
|
};
|
|
|
|
class TrackEventDescriptor : public ::protozero::Message {
|
|
public:
|
|
using Decoder = TrackEventDescriptor_Decoder;
|
|
enum : int32_t {
|
|
kAvailableCategoriesFieldNumber = 1,
|
|
};
|
|
template <typename T = TrackEventCategory> T* add_available_categories() {
|
|
return BeginNestedMessage<T>(1);
|
|
}
|
|
|
|
};
|
|
|
|
class TrackEventCategory_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/3, /*HAS_NONPACKED_REPEATED_FIELDS=*/true> {
|
|
public:
|
|
TrackEventCategory_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
|
|
explicit TrackEventCategory_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
|
|
explicit TrackEventCategory_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
|
|
bool has_name() const { return at<1>().valid(); }
|
|
::protozero::ConstChars name() const { return at<1>().as_string(); }
|
|
bool has_description() const { return at<2>().valid(); }
|
|
::protozero::ConstChars description() const { return at<2>().as_string(); }
|
|
bool has_tags() const { return at<3>().valid(); }
|
|
::protozero::RepeatedFieldIterator<::protozero::ConstChars> tags() const { return GetRepeated<::protozero::ConstChars>(3); }
|
|
};
|
|
|
|
class TrackEventCategory : public ::protozero::Message {
|
|
public:
|
|
using Decoder = TrackEventCategory_Decoder;
|
|
enum : int32_t {
|
|
kNameFieldNumber = 1,
|
|
kDescriptionFieldNumber = 2,
|
|
kTagsFieldNumber = 3,
|
|
};
|
|
void set_name(const std::string& value) {
|
|
AppendBytes(1, value.data(), value.size());
|
|
}
|
|
void set_name(const char* data, size_t size) {
|
|
AppendBytes(1, data, size);
|
|
}
|
|
void set_description(const std::string& value) {
|
|
AppendBytes(2, value.data(), value.size());
|
|
}
|
|
void set_description(const char* data, size_t size) {
|
|
AppendBytes(2, data, size);
|
|
}
|
|
void add_tags(const std::string& value) {
|
|
AppendBytes(3, value.data(), value.size());
|
|
}
|
|
void add_tags(const char* data, size_t size) {
|
|
AppendBytes(3, data, size);
|
|
}
|
|
};
|
|
|
|
} // Namespace.
|
|
} // Namespace.
|
|
} // Namespace.
|
|
#endif // Include guard.
|
|
// gen_amalgamated begin header: gen/protos/perfetto/trace/trace_packet_defaults.pbzero.h
|
|
// Autogenerated by the ProtoZero compiler plugin. DO NOT EDIT.
|
|
|
|
#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACE_PACKET_DEFAULTS_PROTO_H_
|
|
#define PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACE_PACKET_DEFAULTS_PROTO_H_
|
|
|
|
#include <stddef.h>
|
|
#include <stdint.h>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/proto_utils.h"
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace pbzero {
|
|
|
|
class TrackEventDefaults;
|
|
|
|
class TracePacketDefaults_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/58, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
|
|
public:
|
|
TracePacketDefaults_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
|
|
explicit TracePacketDefaults_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
|
|
explicit TracePacketDefaults_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
|
|
bool has_timestamp_clock_id() const { return at<58>().valid(); }
|
|
uint32_t timestamp_clock_id() const { return at<58>().as_uint32(); }
|
|
bool has_track_event_defaults() const { return at<11>().valid(); }
|
|
::protozero::ConstBytes track_event_defaults() const { return at<11>().as_bytes(); }
|
|
};
|
|
|
|
class TracePacketDefaults : public ::protozero::Message {
|
|
public:
|
|
using Decoder = TracePacketDefaults_Decoder;
|
|
enum : int32_t {
|
|
kTimestampClockIdFieldNumber = 58,
|
|
kTrackEventDefaultsFieldNumber = 11,
|
|
};
|
|
void set_timestamp_clock_id(uint32_t value) {
|
|
AppendVarInt(58, value);
|
|
}
|
|
template <typename T = TrackEventDefaults> T* set_track_event_defaults() {
|
|
return BeginNestedMessage<T>(11);
|
|
}
|
|
|
|
};
|
|
|
|
} // Namespace.
|
|
} // Namespace.
|
|
} // Namespace.
|
|
#endif // Include guard.
|
|
/*
|
|
* Copyright (C) 2019 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/tracing/internal/track_event_internal.h"
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/base/proc_utils.h"
|
|
// gen_amalgamated expanded: #include "perfetto/base/thread_utils.h"
|
|
// gen_amalgamated expanded: #include "perfetto/base/time.h"
|
|
// gen_amalgamated expanded: #include "perfetto/tracing/core/data_source_config.h"
|
|
// gen_amalgamated expanded: #include "perfetto/tracing/track_event.h"
|
|
// gen_amalgamated expanded: #include "perfetto/tracing/track_event_category_registry.h"
|
|
// gen_amalgamated expanded: #include "perfetto/tracing/track_event_interned_data_index.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/common/data_source_descriptor.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/common/track_event_descriptor.pbzero.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/trace/interned_data/interned_data.pbzero.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/trace/trace_packet_defaults.pbzero.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/trace/track_event/debug_annotation.pbzero.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/trace/track_event/track_descriptor.pbzero.h"
|
|
|
|
namespace perfetto {
|
|
namespace internal {
|
|
|
|
BaseTrackEventInternedDataIndex::~BaseTrackEventInternedDataIndex() = default;
|
|
|
|
namespace {
|
|
|
|
std::atomic<perfetto::base::PlatformThreadId> g_main_thread;
|
|
static constexpr const char kLegacySlowPrefix[] = "disabled-by-default-";
|
|
static constexpr const char kSlowTag[] = "slow";
|
|
static constexpr const char kDebugTag[] = "debug";
|
|
|
|
struct InternedEventCategory
|
|
: public TrackEventInternedDataIndex<
|
|
InternedEventCategory,
|
|
perfetto::protos::pbzero::InternedData::kEventCategoriesFieldNumber,
|
|
const char*,
|
|
SmallInternedDataTraits> {
|
|
static void Add(protos::pbzero::InternedData* interned_data,
|
|
size_t iid,
|
|
const char* value,
|
|
size_t length) {
|
|
auto category = interned_data->add_event_categories();
|
|
category->set_iid(iid);
|
|
category->set_name(value, length);
|
|
}
|
|
};
|
|
|
|
struct InternedEventName
|
|
: public TrackEventInternedDataIndex<
|
|
InternedEventName,
|
|
perfetto::protos::pbzero::InternedData::kEventNamesFieldNumber,
|
|
const char*,
|
|
SmallInternedDataTraits> {
|
|
static void Add(protos::pbzero::InternedData* interned_data,
|
|
size_t iid,
|
|
const char* value) {
|
|
auto name = interned_data->add_event_names();
|
|
name->set_iid(iid);
|
|
name->set_name(value);
|
|
}
|
|
};
|
|
|
|
struct InternedDebugAnnotationName
|
|
: public TrackEventInternedDataIndex<
|
|
InternedDebugAnnotationName,
|
|
perfetto::protos::pbzero::InternedData::
|
|
kDebugAnnotationNamesFieldNumber,
|
|
const char*,
|
|
SmallInternedDataTraits> {
|
|
static void Add(protos::pbzero::InternedData* interned_data,
|
|
size_t iid,
|
|
const char* value) {
|
|
auto name = interned_data->add_debug_annotation_names();
|
|
name->set_iid(iid);
|
|
name->set_name(value);
|
|
}
|
|
};
|
|
|
|
enum class MatchType { kExact, kPattern };
|
|
|
|
bool NameMatchesPattern(const std::string& pattern,
|
|
const std::string& name,
|
|
MatchType match_type) {
|
|
// To avoid pulling in all of std::regex, for now we only support a single "*"
|
|
// wildcard at the end of the pattern.
|
|
size_t i = pattern.find('*');
|
|
if (i != std::string::npos) {
|
|
PERFETTO_DCHECK(i == pattern.size() - 1);
|
|
if (match_type != MatchType::kPattern)
|
|
return false;
|
|
return name.substr(0, i) == pattern.substr(0, i);
|
|
}
|
|
return name == pattern;
|
|
}
|
|
|
|
bool NameMatchesPatternList(const std::vector<std::string>& patterns,
|
|
const std::string& name,
|
|
MatchType match_type) {
|
|
for (const auto& pattern : patterns) {
|
|
if (NameMatchesPattern(pattern, name, match_type))
|
|
return true;
|
|
}
|
|
return false;
|
|
}
|
|
|
|
} // namespace
|
|
|
|
// static
|
|
bool TrackEventInternal::Initialize(
|
|
const TrackEventCategoryRegistry& registry,
|
|
bool (*register_data_source)(const DataSourceDescriptor&)) {
|
|
if (!g_main_thread)
|
|
g_main_thread = perfetto::base::GetThreadId();
|
|
|
|
DataSourceDescriptor dsd;
|
|
dsd.set_name("track_event");
|
|
|
|
protozero::HeapBuffered<protos::pbzero::TrackEventDescriptor> ted;
|
|
for (size_t i = 0; i < registry.category_count(); i++) {
|
|
auto category = registry.GetCategory(i);
|
|
// Don't register group categories.
|
|
if (category->IsGroup())
|
|
continue;
|
|
auto cat = ted->add_available_categories();
|
|
cat->set_name(category->name);
|
|
if (category->description)
|
|
cat->set_description(category->description);
|
|
for (const auto& tag : category->tags) {
|
|
if (tag)
|
|
cat->add_tags(tag);
|
|
}
|
|
// Disabled-by-default categories get a "slow" tag.
|
|
if (!strncmp(category->name, kLegacySlowPrefix, strlen(kLegacySlowPrefix)))
|
|
cat->add_tags(kSlowTag);
|
|
}
|
|
dsd.set_track_event_descriptor_raw(ted.SerializeAsString());
|
|
|
|
return register_data_source(dsd);
|
|
}
|
|
|
|
// static
|
|
void TrackEventInternal::EnableTracing(
|
|
const TrackEventCategoryRegistry& registry,
|
|
const protos::gen::TrackEventConfig& config,
|
|
uint32_t instance_index) {
|
|
for (size_t i = 0; i < registry.category_count(); i++) {
|
|
if (IsCategoryEnabled(registry, config, *registry.GetCategory(i)))
|
|
registry.EnableCategoryForInstance(i, instance_index);
|
|
}
|
|
}
|
|
|
|
// static
|
|
void TrackEventInternal::DisableTracing(
|
|
const TrackEventCategoryRegistry& registry,
|
|
uint32_t instance_index) {
|
|
for (size_t i = 0; i < registry.category_count(); i++)
|
|
registry.DisableCategoryForInstance(i, instance_index);
|
|
}
|
|
|
|
// static
|
|
bool TrackEventInternal::IsCategoryEnabled(
|
|
const TrackEventCategoryRegistry& registry,
|
|
const protos::gen::TrackEventConfig& config,
|
|
const Category& category) {
|
|
// If this is a group category, check if any of its constituent categories are
|
|
// enabled. If so, then this one is enabled too.
|
|
if (category.IsGroup()) {
|
|
bool result = false;
|
|
category.ForEachGroupMember([&](const char* member_name, size_t name_size) {
|
|
for (size_t i = 0; i < registry.category_count(); i++) {
|
|
const auto ref_category = registry.GetCategory(i);
|
|
// Groups can't refer to other groups.
|
|
if (ref_category->IsGroup())
|
|
continue;
|
|
// Require an exact match.
|
|
if (ref_category->name_size() != name_size ||
|
|
strncmp(ref_category->name, member_name, name_size)) {
|
|
continue;
|
|
}
|
|
if (IsCategoryEnabled(registry, config, *ref_category)) {
|
|
result = true;
|
|
// Break ForEachGroupMember() loop.
|
|
return false;
|
|
}
|
|
break;
|
|
}
|
|
// No match found => keep iterating.
|
|
return true;
|
|
});
|
|
return result;
|
|
}
|
|
|
|
auto has_matching_tag = [&](std::function<bool(const char*)> matcher) {
|
|
for (const auto& tag : category.tags) {
|
|
if (!tag)
|
|
break;
|
|
if (matcher(tag))
|
|
return true;
|
|
}
|
|
// Legacy "disabled-by-default" categories automatically get the "slow" tag.
|
|
if (!strncmp(category.name, kLegacySlowPrefix, strlen(kLegacySlowPrefix)) &&
|
|
matcher(kSlowTag)) {
|
|
return true;
|
|
}
|
|
return false;
|
|
};
|
|
|
|
// First try exact matches, then pattern matches.
|
|
const std::array<MatchType, 2> match_types = {
|
|
{MatchType::kExact, MatchType::kPattern}};
|
|
for (auto match_type : match_types) {
|
|
// 1. Enabled categories.
|
|
if (NameMatchesPatternList(config.enabled_categories(), category.name,
|
|
match_type)) {
|
|
return true;
|
|
}
|
|
|
|
// 2. Enabled tags.
|
|
if (has_matching_tag([&](const char* tag) {
|
|
return NameMatchesPatternList(config.enabled_tags(), tag, match_type);
|
|
})) {
|
|
return true;
|
|
}
|
|
|
|
// 3. Disabled categories.
|
|
if (NameMatchesPatternList(config.disabled_categories(), category.name,
|
|
match_type)) {
|
|
return false;
|
|
}
|
|
|
|
// 4. Disabled tags.
|
|
if (has_matching_tag([&](const char* tag) {
|
|
if (config.disabled_tags_size()) {
|
|
return NameMatchesPatternList(config.disabled_tags(), tag,
|
|
match_type);
|
|
} else {
|
|
// The "slow" and "debug" tags are disabled by default.
|
|
return NameMatchesPattern(kSlowTag, tag, match_type) ||
|
|
NameMatchesPattern(kDebugTag, tag, match_type);
|
|
}
|
|
})) {
|
|
return false;
|
|
}
|
|
}
|
|
|
|
// If nothing matched, enable the category by default.
|
|
return true;
|
|
}
|
|
|
|
// static
|
|
uint64_t TrackEventInternal::GetTimeNs() {
|
|
if (GetClockId() == protos::pbzero::BUILTIN_CLOCK_BOOTTIME)
|
|
return static_cast<uint64_t>(perfetto::base::GetBootTimeNs().count());
|
|
PERFETTO_DCHECK(GetClockId() == protos::pbzero::BUILTIN_CLOCK_MONOTONIC);
|
|
return static_cast<uint64_t>(perfetto::base::GetWallTimeNs().count());
|
|
}
|
|
|
|
// static
|
|
void TrackEventInternal::ResetIncrementalState(TraceWriterBase* trace_writer,
|
|
uint64_t timestamp) {
|
|
auto default_track = ThreadTrack::Current();
|
|
{
|
|
// Mark any incremental state before this point invalid. Also set up
|
|
// defaults so that we don't need to repeat constant data for each packet.
|
|
auto packet = NewTracePacket(
|
|
trace_writer, timestamp,
|
|
protos::pbzero::TracePacket::SEQ_INCREMENTAL_STATE_CLEARED);
|
|
auto defaults = packet->set_trace_packet_defaults();
|
|
defaults->set_timestamp_clock_id(GetClockId());
|
|
|
|
// Establish the default track for this event sequence.
|
|
auto track_defaults = defaults->set_track_event_defaults();
|
|
track_defaults->set_track_uuid(default_track.uuid);
|
|
}
|
|
|
|
// Every thread should write a descriptor for its default track, because most
|
|
// trace points won't explicitly reference it.
|
|
WriteTrackDescriptor(default_track, trace_writer);
|
|
|
|
// Additionally the main thread should dump the process descriptor.
|
|
if (perfetto::base::GetThreadId() == g_main_thread)
|
|
WriteTrackDescriptor(ProcessTrack::Current(), trace_writer);
|
|
}
|
|
|
|
// static
|
|
protozero::MessageHandle<protos::pbzero::TracePacket>
|
|
TrackEventInternal::NewTracePacket(TraceWriterBase* trace_writer,
|
|
uint64_t timestamp,
|
|
uint32_t seq_flags) {
|
|
auto packet = trace_writer->NewTracePacket();
|
|
packet->set_timestamp(timestamp);
|
|
// TODO(skyostil): Stop emitting this for every event once the trace
|
|
// processor understands trace packet defaults.
|
|
if (GetClockId() != protos::pbzero::BUILTIN_CLOCK_BOOTTIME)
|
|
packet->set_timestamp_clock_id(GetClockId());
|
|
packet->set_sequence_flags(seq_flags);
|
|
return packet;
|
|
}
|
|
|
|
// static
|
|
EventContext TrackEventInternal::WriteEvent(
|
|
TraceWriterBase* trace_writer,
|
|
TrackEventIncrementalState* incr_state,
|
|
const Category* category,
|
|
const char* name,
|
|
perfetto::protos::pbzero::TrackEvent::Type type,
|
|
uint64_t timestamp) {
|
|
PERFETTO_DCHECK(g_main_thread);
|
|
|
|
if (incr_state->was_cleared) {
|
|
incr_state->was_cleared = false;
|
|
ResetIncrementalState(trace_writer, timestamp);
|
|
}
|
|
auto packet = NewTracePacket(trace_writer, timestamp);
|
|
EventContext ctx(std::move(packet), incr_state);
|
|
|
|
auto track_event = ctx.event();
|
|
if (type != protos::pbzero::TrackEvent::TYPE_UNSPECIFIED)
|
|
track_event->set_type(type);
|
|
|
|
// We assume that |category| and |name| point to strings with static lifetime.
|
|
// This means we can use their addresses as interning keys.
|
|
if (category && type != protos::pbzero::TrackEvent::TYPE_SLICE_END) {
|
|
category->ForEachGroupMember(
|
|
[&](const char* member_name, size_t name_size) {
|
|
size_t category_iid =
|
|
InternedEventCategory::Get(&ctx, member_name, name_size);
|
|
track_event->add_category_iids(category_iid);
|
|
return true;
|
|
});
|
|
}
|
|
if (name) {
|
|
size_t name_iid = InternedEventName::Get(&ctx, name);
|
|
track_event->set_name_iid(name_iid);
|
|
}
|
|
return ctx;
|
|
}
|
|
|
|
// static
|
|
protos::pbzero::DebugAnnotation* TrackEventInternal::AddDebugAnnotation(
|
|
perfetto::EventContext* event_ctx,
|
|
const char* name) {
|
|
auto annotation = event_ctx->event()->add_debug_annotations();
|
|
annotation->set_name_iid(InternedDebugAnnotationName::Get(event_ctx, name));
|
|
return annotation;
|
|
}
|
|
|
|
} // namespace internal
|
|
} // namespace perfetto
|
|
// gen_amalgamated begin source: src/tracing/platform.cc
|
|
/*
|
|
* Copyright (C) 2019 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/tracing/platform.h"
|
|
// gen_amalgamated expanded: #include "perfetto/tracing/internal/tracing_tls.h"
|
|
// gen_amalgamated expanded: #include "perfetto/tracing/trace_writer_base.h"
|
|
|
|
namespace perfetto {
|
|
|
|
PlatformThreadLocalObject::~PlatformThreadLocalObject() = default;
|
|
Platform::~Platform() = default;
|
|
|
|
// static
|
|
std::unique_ptr<PlatformThreadLocalObject>
|
|
PlatformThreadLocalObject::CreateInstance() {
|
|
return std::unique_ptr<PlatformThreadLocalObject>(new internal::TracingTLS());
|
|
}
|
|
|
|
} // namespace perfetto
|
|
// gen_amalgamated begin source: src/tracing/tracing.cc
|
|
/*
|
|
* Copyright (C) 2019 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/tracing/tracing.h"
|
|
// gen_amalgamated expanded: #include "perfetto/tracing/internal/track_event_internal.h"
|
|
// gen_amalgamated expanded: #include "src/tracing/internal/tracing_muxer_impl.h"
|
|
|
|
#include <condition_variable>
|
|
#include <mutex>
|
|
|
|
namespace perfetto {
|
|
|
|
// static
|
|
void Tracing::InitializeInternal(const TracingInitArgs& args) {
|
|
static bool was_initialized = false;
|
|
static TracingInitArgs init_args;
|
|
if (was_initialized) {
|
|
// Should not be reinitialized with different args.
|
|
PERFETTO_DCHECK(init_args == args);
|
|
return;
|
|
}
|
|
|
|
// Make sure the headers and implementation files agree on the build config.
|
|
PERFETTO_CHECK(args.dcheck_is_on_ == PERFETTO_DCHECK_IS_ON());
|
|
internal::TracingMuxerImpl::InitializeInstance(args);
|
|
internal::TrackRegistry::InitializeInstance();
|
|
was_initialized = true;
|
|
init_args = args;
|
|
}
|
|
|
|
// static
|
|
std::unique_ptr<TracingSession> Tracing::NewTrace(BackendType backend) {
|
|
return static_cast<internal::TracingMuxerImpl*>(internal::TracingMuxer::Get())
|
|
->CreateTracingSession(backend);
|
|
}
|
|
|
|
std::vector<char> TracingSession::ReadTraceBlocking() {
|
|
std::vector<char> raw_trace;
|
|
std::mutex mutex;
|
|
std::condition_variable cv;
|
|
bool all_read = false;
|
|
|
|
ReadTrace([&mutex, &raw_trace, &all_read, &cv](ReadTraceCallbackArgs cb) {
|
|
raw_trace.insert(raw_trace.end(), cb.data, cb.data + cb.size);
|
|
std::unique_lock<std::mutex> lock(mutex);
|
|
all_read = !cb.has_more;
|
|
if (all_read)
|
|
cv.notify_one();
|
|
});
|
|
|
|
{
|
|
std::unique_lock<std::mutex> lock(mutex);
|
|
cv.wait(lock, [&all_read] { return all_read; });
|
|
}
|
|
return raw_trace;
|
|
}
|
|
|
|
TracingSession::GetTraceStatsCallbackArgs
|
|
TracingSession::GetTraceStatsBlocking() {
|
|
std::mutex mutex;
|
|
std::condition_variable cv;
|
|
GetTraceStatsCallbackArgs result;
|
|
bool stats_read = false;
|
|
|
|
GetTraceStats(
|
|
[&mutex, &result, &stats_read, &cv](GetTraceStatsCallbackArgs args) {
|
|
result = std::move(args);
|
|
std::unique_lock<std::mutex> lock(mutex);
|
|
stats_read = true;
|
|
cv.notify_one();
|
|
});
|
|
|
|
{
|
|
std::unique_lock<std::mutex> lock(mutex);
|
|
cv.wait(lock, [&stats_read] { return stats_read; });
|
|
}
|
|
return result;
|
|
}
|
|
|
|
} // namespace perfetto
|
|
// gen_amalgamated begin source: src/tracing/track.cc
|
|
// gen_amalgamated begin header: gen/protos/perfetto/trace/track_event/process_descriptor.pbzero.h
|
|
// Autogenerated by the ProtoZero compiler plugin. DO NOT EDIT.
|
|
|
|
#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_PROCESS_DESCRIPTOR_PROTO_H_
|
|
#define PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_PROCESS_DESCRIPTOR_PROTO_H_
|
|
|
|
#include <stddef.h>
|
|
#include <stdint.h>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/proto_utils.h"
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace pbzero {
|
|
|
|
enum ProcessDescriptor_ChromeProcessType : int32_t;
|
|
|
|
enum ProcessDescriptor_ChromeProcessType : int32_t {
|
|
ProcessDescriptor_ChromeProcessType_PROCESS_UNSPECIFIED = 0,
|
|
ProcessDescriptor_ChromeProcessType_PROCESS_BROWSER = 1,
|
|
ProcessDescriptor_ChromeProcessType_PROCESS_RENDERER = 2,
|
|
ProcessDescriptor_ChromeProcessType_PROCESS_UTILITY = 3,
|
|
ProcessDescriptor_ChromeProcessType_PROCESS_ZYGOTE = 4,
|
|
ProcessDescriptor_ChromeProcessType_PROCESS_SANDBOX_HELPER = 5,
|
|
ProcessDescriptor_ChromeProcessType_PROCESS_GPU = 6,
|
|
ProcessDescriptor_ChromeProcessType_PROCESS_PPAPI_PLUGIN = 7,
|
|
ProcessDescriptor_ChromeProcessType_PROCESS_PPAPI_BROKER = 8,
|
|
};
|
|
|
|
const ProcessDescriptor_ChromeProcessType ProcessDescriptor_ChromeProcessType_MIN = ProcessDescriptor_ChromeProcessType_PROCESS_UNSPECIFIED;
|
|
const ProcessDescriptor_ChromeProcessType ProcessDescriptor_ChromeProcessType_MAX = ProcessDescriptor_ChromeProcessType_PROCESS_PPAPI_BROKER;
|
|
|
|
class ProcessDescriptor_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/6, /*HAS_NONPACKED_REPEATED_FIELDS=*/true> {
|
|
public:
|
|
ProcessDescriptor_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
|
|
explicit ProcessDescriptor_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
|
|
explicit ProcessDescriptor_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
|
|
bool has_pid() const { return at<1>().valid(); }
|
|
int32_t pid() const { return at<1>().as_int32(); }
|
|
bool has_cmdline() const { return at<2>().valid(); }
|
|
::protozero::RepeatedFieldIterator<::protozero::ConstChars> cmdline() const { return GetRepeated<::protozero::ConstChars>(2); }
|
|
bool has_process_name() const { return at<6>().valid(); }
|
|
::protozero::ConstChars process_name() const { return at<6>().as_string(); }
|
|
bool has_process_priority() const { return at<5>().valid(); }
|
|
int32_t process_priority() const { return at<5>().as_int32(); }
|
|
bool has_chrome_process_type() const { return at<4>().valid(); }
|
|
int32_t chrome_process_type() const { return at<4>().as_int32(); }
|
|
bool has_legacy_sort_index() const { return at<3>().valid(); }
|
|
int32_t legacy_sort_index() const { return at<3>().as_int32(); }
|
|
};
|
|
|
|
class ProcessDescriptor : public ::protozero::Message {
|
|
public:
|
|
using Decoder = ProcessDescriptor_Decoder;
|
|
enum : int32_t {
|
|
kPidFieldNumber = 1,
|
|
kCmdlineFieldNumber = 2,
|
|
kProcessNameFieldNumber = 6,
|
|
kProcessPriorityFieldNumber = 5,
|
|
kChromeProcessTypeFieldNumber = 4,
|
|
kLegacySortIndexFieldNumber = 3,
|
|
};
|
|
using ChromeProcessType = ::perfetto::protos::pbzero::ProcessDescriptor_ChromeProcessType;
|
|
static const ChromeProcessType PROCESS_UNSPECIFIED = ProcessDescriptor_ChromeProcessType_PROCESS_UNSPECIFIED;
|
|
static const ChromeProcessType PROCESS_BROWSER = ProcessDescriptor_ChromeProcessType_PROCESS_BROWSER;
|
|
static const ChromeProcessType PROCESS_RENDERER = ProcessDescriptor_ChromeProcessType_PROCESS_RENDERER;
|
|
static const ChromeProcessType PROCESS_UTILITY = ProcessDescriptor_ChromeProcessType_PROCESS_UTILITY;
|
|
static const ChromeProcessType PROCESS_ZYGOTE = ProcessDescriptor_ChromeProcessType_PROCESS_ZYGOTE;
|
|
static const ChromeProcessType PROCESS_SANDBOX_HELPER = ProcessDescriptor_ChromeProcessType_PROCESS_SANDBOX_HELPER;
|
|
static const ChromeProcessType PROCESS_GPU = ProcessDescriptor_ChromeProcessType_PROCESS_GPU;
|
|
static const ChromeProcessType PROCESS_PPAPI_PLUGIN = ProcessDescriptor_ChromeProcessType_PROCESS_PPAPI_PLUGIN;
|
|
static const ChromeProcessType PROCESS_PPAPI_BROKER = ProcessDescriptor_ChromeProcessType_PROCESS_PPAPI_BROKER;
|
|
void set_pid(int32_t value) {
|
|
AppendVarInt(1, value);
|
|
}
|
|
void add_cmdline(const std::string& value) {
|
|
AppendBytes(2, value.data(), value.size());
|
|
}
|
|
void add_cmdline(const char* data, size_t size) {
|
|
AppendBytes(2, data, size);
|
|
}
|
|
void set_process_name(const std::string& value) {
|
|
AppendBytes(6, value.data(), value.size());
|
|
}
|
|
void set_process_name(const char* data, size_t size) {
|
|
AppendBytes(6, data, size);
|
|
}
|
|
void set_process_priority(int32_t value) {
|
|
AppendVarInt(5, value);
|
|
}
|
|
void set_chrome_process_type(::perfetto::protos::pbzero::ProcessDescriptor_ChromeProcessType value) {
|
|
AppendTinyVarInt(4, value);
|
|
}
|
|
void set_legacy_sort_index(int32_t value) {
|
|
AppendVarInt(3, value);
|
|
}
|
|
};
|
|
|
|
} // Namespace.
|
|
} // Namespace.
|
|
} // Namespace.
|
|
#endif // Include guard.
|
|
// gen_amalgamated begin header: gen/protos/perfetto/trace/track_event/thread_descriptor.pbzero.h
|
|
// Autogenerated by the ProtoZero compiler plugin. DO NOT EDIT.
|
|
|
|
#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_THREAD_DESCRIPTOR_PROTO_H_
|
|
#define PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_THREAD_DESCRIPTOR_PROTO_H_
|
|
|
|
#include <stddef.h>
|
|
#include <stdint.h>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/proto_utils.h"
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace pbzero {
|
|
|
|
enum ThreadDescriptor_ChromeThreadType : int32_t;
|
|
|
|
enum ThreadDescriptor_ChromeThreadType : int32_t {
|
|
ThreadDescriptor_ChromeThreadType_CHROME_THREAD_UNSPECIFIED = 0,
|
|
ThreadDescriptor_ChromeThreadType_CHROME_THREAD_MAIN = 1,
|
|
ThreadDescriptor_ChromeThreadType_CHROME_THREAD_IO = 2,
|
|
ThreadDescriptor_ChromeThreadType_CHROME_THREAD_POOL_BG_WORKER = 3,
|
|
ThreadDescriptor_ChromeThreadType_CHROME_THREAD_POOL_FG_WORKER = 4,
|
|
ThreadDescriptor_ChromeThreadType_CHROME_THREAD_POOL_FB_BLOCKING = 5,
|
|
ThreadDescriptor_ChromeThreadType_CHROME_THREAD_POOL_BG_BLOCKING = 6,
|
|
ThreadDescriptor_ChromeThreadType_CHROME_THREAD_POOL_SERVICE = 7,
|
|
ThreadDescriptor_ChromeThreadType_CHROME_THREAD_COMPOSITOR = 8,
|
|
ThreadDescriptor_ChromeThreadType_CHROME_THREAD_VIZ_COMPOSITOR = 9,
|
|
ThreadDescriptor_ChromeThreadType_CHROME_THREAD_COMPOSITOR_WORKER = 10,
|
|
ThreadDescriptor_ChromeThreadType_CHROME_THREAD_SERVICE_WORKER = 11,
|
|
ThreadDescriptor_ChromeThreadType_CHROME_THREAD_MEMORY_INFRA = 50,
|
|
ThreadDescriptor_ChromeThreadType_CHROME_THREAD_SAMPLING_PROFILER = 51,
|
|
};
|
|
|
|
const ThreadDescriptor_ChromeThreadType ThreadDescriptor_ChromeThreadType_MIN = ThreadDescriptor_ChromeThreadType_CHROME_THREAD_UNSPECIFIED;
|
|
const ThreadDescriptor_ChromeThreadType ThreadDescriptor_ChromeThreadType_MAX = ThreadDescriptor_ChromeThreadType_CHROME_THREAD_SAMPLING_PROFILER;
|
|
|
|
class ThreadDescriptor_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/8, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
|
|
public:
|
|
ThreadDescriptor_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
|
|
explicit ThreadDescriptor_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
|
|
explicit ThreadDescriptor_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
|
|
bool has_pid() const { return at<1>().valid(); }
|
|
int32_t pid() const { return at<1>().as_int32(); }
|
|
bool has_tid() const { return at<2>().valid(); }
|
|
int32_t tid() const { return at<2>().as_int32(); }
|
|
bool has_thread_name() const { return at<5>().valid(); }
|
|
::protozero::ConstChars thread_name() const { return at<5>().as_string(); }
|
|
bool has_chrome_thread_type() const { return at<4>().valid(); }
|
|
int32_t chrome_thread_type() const { return at<4>().as_int32(); }
|
|
bool has_reference_timestamp_us() const { return at<6>().valid(); }
|
|
int64_t reference_timestamp_us() const { return at<6>().as_int64(); }
|
|
bool has_reference_thread_time_us() const { return at<7>().valid(); }
|
|
int64_t reference_thread_time_us() const { return at<7>().as_int64(); }
|
|
bool has_reference_thread_instruction_count() const { return at<8>().valid(); }
|
|
int64_t reference_thread_instruction_count() const { return at<8>().as_int64(); }
|
|
bool has_legacy_sort_index() const { return at<3>().valid(); }
|
|
int32_t legacy_sort_index() const { return at<3>().as_int32(); }
|
|
};
|
|
|
|
class ThreadDescriptor : public ::protozero::Message {
|
|
public:
|
|
using Decoder = ThreadDescriptor_Decoder;
|
|
enum : int32_t {
|
|
kPidFieldNumber = 1,
|
|
kTidFieldNumber = 2,
|
|
kThreadNameFieldNumber = 5,
|
|
kChromeThreadTypeFieldNumber = 4,
|
|
kReferenceTimestampUsFieldNumber = 6,
|
|
kReferenceThreadTimeUsFieldNumber = 7,
|
|
kReferenceThreadInstructionCountFieldNumber = 8,
|
|
kLegacySortIndexFieldNumber = 3,
|
|
};
|
|
using ChromeThreadType = ::perfetto::protos::pbzero::ThreadDescriptor_ChromeThreadType;
|
|
static const ChromeThreadType CHROME_THREAD_UNSPECIFIED = ThreadDescriptor_ChromeThreadType_CHROME_THREAD_UNSPECIFIED;
|
|
static const ChromeThreadType CHROME_THREAD_MAIN = ThreadDescriptor_ChromeThreadType_CHROME_THREAD_MAIN;
|
|
static const ChromeThreadType CHROME_THREAD_IO = ThreadDescriptor_ChromeThreadType_CHROME_THREAD_IO;
|
|
static const ChromeThreadType CHROME_THREAD_POOL_BG_WORKER = ThreadDescriptor_ChromeThreadType_CHROME_THREAD_POOL_BG_WORKER;
|
|
static const ChromeThreadType CHROME_THREAD_POOL_FG_WORKER = ThreadDescriptor_ChromeThreadType_CHROME_THREAD_POOL_FG_WORKER;
|
|
static const ChromeThreadType CHROME_THREAD_POOL_FB_BLOCKING = ThreadDescriptor_ChromeThreadType_CHROME_THREAD_POOL_FB_BLOCKING;
|
|
static const ChromeThreadType CHROME_THREAD_POOL_BG_BLOCKING = ThreadDescriptor_ChromeThreadType_CHROME_THREAD_POOL_BG_BLOCKING;
|
|
static const ChromeThreadType CHROME_THREAD_POOL_SERVICE = ThreadDescriptor_ChromeThreadType_CHROME_THREAD_POOL_SERVICE;
|
|
static const ChromeThreadType CHROME_THREAD_COMPOSITOR = ThreadDescriptor_ChromeThreadType_CHROME_THREAD_COMPOSITOR;
|
|
static const ChromeThreadType CHROME_THREAD_VIZ_COMPOSITOR = ThreadDescriptor_ChromeThreadType_CHROME_THREAD_VIZ_COMPOSITOR;
|
|
static const ChromeThreadType CHROME_THREAD_COMPOSITOR_WORKER = ThreadDescriptor_ChromeThreadType_CHROME_THREAD_COMPOSITOR_WORKER;
|
|
static const ChromeThreadType CHROME_THREAD_SERVICE_WORKER = ThreadDescriptor_ChromeThreadType_CHROME_THREAD_SERVICE_WORKER;
|
|
static const ChromeThreadType CHROME_THREAD_MEMORY_INFRA = ThreadDescriptor_ChromeThreadType_CHROME_THREAD_MEMORY_INFRA;
|
|
static const ChromeThreadType CHROME_THREAD_SAMPLING_PROFILER = ThreadDescriptor_ChromeThreadType_CHROME_THREAD_SAMPLING_PROFILER;
|
|
void set_pid(int32_t value) {
|
|
AppendVarInt(1, value);
|
|
}
|
|
void set_tid(int32_t value) {
|
|
AppendVarInt(2, value);
|
|
}
|
|
void set_thread_name(const std::string& value) {
|
|
AppendBytes(5, value.data(), value.size());
|
|
}
|
|
void set_thread_name(const char* data, size_t size) {
|
|
AppendBytes(5, data, size);
|
|
}
|
|
void set_chrome_thread_type(::perfetto::protos::pbzero::ThreadDescriptor_ChromeThreadType value) {
|
|
AppendTinyVarInt(4, value);
|
|
}
|
|
void set_reference_timestamp_us(int64_t value) {
|
|
AppendVarInt(6, value);
|
|
}
|
|
void set_reference_thread_time_us(int64_t value) {
|
|
AppendVarInt(7, value);
|
|
}
|
|
void set_reference_thread_instruction_count(int64_t value) {
|
|
AppendVarInt(8, value);
|
|
}
|
|
void set_legacy_sort_index(int32_t value) {
|
|
AppendVarInt(3, value);
|
|
}
|
|
};
|
|
|
|
} // Namespace.
|
|
} // Namespace.
|
|
} // Namespace.
|
|
#endif // Include guard.
|
|
/*
|
|
* Copyright (C) 2019 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/tracing/track.h"
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/uuid.h"
|
|
// gen_amalgamated expanded: #include "perfetto/tracing/internal/track_event_data_source.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/trace/track_event/process_descriptor.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/trace/track_event/process_descriptor.pbzero.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/trace/track_event/thread_descriptor.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/trace/track_event/thread_descriptor.pbzero.h"
|
|
|
|
namespace perfetto {
|
|
|
|
// static
|
|
uint64_t Track::process_uuid;
|
|
|
|
protos::gen::TrackDescriptor Track::Serialize() const {
|
|
protos::gen::TrackDescriptor desc;
|
|
desc.set_uuid(uuid);
|
|
if (parent_uuid)
|
|
desc.set_parent_uuid(parent_uuid);
|
|
return desc;
|
|
}
|
|
|
|
void Track::Serialize(protos::pbzero::TrackDescriptor* desc) const {
|
|
auto bytes = Serialize().SerializeAsString();
|
|
desc->AppendRawProtoBytes(bytes.data(), bytes.size());
|
|
}
|
|
|
|
protos::gen::TrackDescriptor ProcessTrack::Serialize() const {
|
|
auto desc = Track::Serialize();
|
|
auto pd = desc.mutable_process();
|
|
pd->set_pid(static_cast<int32_t>(pid));
|
|
// TODO(skyostil): Record command line.
|
|
return desc;
|
|
}
|
|
|
|
void ProcessTrack::Serialize(protos::pbzero::TrackDescriptor* desc) const {
|
|
auto bytes = Serialize().SerializeAsString();
|
|
desc->AppendRawProtoBytes(bytes.data(), bytes.size());
|
|
}
|
|
|
|
protos::gen::TrackDescriptor ThreadTrack::Serialize() const {
|
|
auto desc = Track::Serialize();
|
|
auto td = desc.mutable_thread();
|
|
td->set_pid(static_cast<int32_t>(pid));
|
|
td->set_tid(static_cast<int32_t>(tid));
|
|
// TODO(skyostil): Record thread name.
|
|
return desc;
|
|
}
|
|
|
|
void ThreadTrack::Serialize(protos::pbzero::TrackDescriptor* desc) const {
|
|
auto bytes = Serialize().SerializeAsString();
|
|
desc->AppendRawProtoBytes(bytes.data(), bytes.size());
|
|
}
|
|
|
|
namespace internal {
|
|
|
|
// static
|
|
TrackRegistry* TrackRegistry::instance_;
|
|
|
|
TrackRegistry::TrackRegistry() = default;
|
|
TrackRegistry::~TrackRegistry() = default;
|
|
|
|
// static
|
|
void TrackRegistry::InitializeInstance() {
|
|
// TODO(eseckler): Chrome may call this more than once. Once Chrome doesn't
|
|
// call this directly anymore, bring back DCHECK(!instance_) instead.
|
|
if (instance_)
|
|
return;
|
|
instance_ = new TrackRegistry();
|
|
Track::process_uuid = static_cast<uint64_t>(base::Uuidv4().lsb());
|
|
}
|
|
|
|
void TrackRegistry::UpdateTrack(Track track,
|
|
const std::string& serialized_desc) {
|
|
std::lock_guard<std::mutex> lock(mutex_);
|
|
tracks_[track.uuid] = std::move(serialized_desc);
|
|
}
|
|
|
|
void TrackRegistry::UpdateTrackImpl(
|
|
Track track,
|
|
std::function<void(protos::pbzero::TrackDescriptor*)> fill_function) {
|
|
constexpr size_t kInitialSliceSize = 32;
|
|
constexpr size_t kMaximumSliceSize = 4096;
|
|
protozero::HeapBuffered<protos::pbzero::TrackDescriptor> new_descriptor(
|
|
kInitialSliceSize, kMaximumSliceSize);
|
|
fill_function(new_descriptor.get());
|
|
auto serialized_desc = new_descriptor.SerializeAsString();
|
|
UpdateTrack(track, serialized_desc);
|
|
}
|
|
|
|
void TrackRegistry::EraseTrack(Track track) {
|
|
std::lock_guard<std::mutex> lock(mutex_);
|
|
tracks_.erase(track.uuid);
|
|
}
|
|
|
|
// static
|
|
void TrackRegistry::WriteTrackDescriptor(
|
|
const SerializedTrackDescriptor& desc,
|
|
protozero::MessageHandle<protos::pbzero::TracePacket> packet) {
|
|
packet->AppendString(
|
|
perfetto::protos::pbzero::TracePacket::kTrackDescriptorFieldNumber, desc);
|
|
}
|
|
|
|
} // namespace internal
|
|
} // namespace perfetto
|
|
// gen_amalgamated begin source: src/tracing/track_event_category_registry.cc
|
|
/*
|
|
* Copyright (C) 2019 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/tracing/track_event_category_registry.h"
|
|
|
|
namespace perfetto {
|
|
|
|
// static
|
|
Category Category::FromDynamicCategory(const char* name) {
|
|
if (GetNthNameSize(1, name, name)) {
|
|
Category group(Group(name));
|
|
PERFETTO_DCHECK(group.name);
|
|
return group;
|
|
}
|
|
Category category(name);
|
|
PERFETTO_DCHECK(category.name);
|
|
return category;
|
|
}
|
|
|
|
Category Category::FromDynamicCategory(
|
|
const DynamicCategory& dynamic_category) {
|
|
return FromDynamicCategory(dynamic_category.name.c_str());
|
|
}
|
|
|
|
namespace internal {
|
|
|
|
perfetto::DynamicCategory NullCategory(const perfetto::DynamicCategory&) {
|
|
return perfetto::DynamicCategory{};
|
|
}
|
|
|
|
const Category* TrackEventCategoryRegistry::GetCategory(size_t index) const {
|
|
PERFETTO_DCHECK(index < category_count_);
|
|
return &categories_[index];
|
|
}
|
|
|
|
void TrackEventCategoryRegistry::EnableCategoryForInstance(
|
|
size_t category_index,
|
|
uint32_t instance_index) const {
|
|
PERFETTO_DCHECK(instance_index < kMaxDataSourceInstances);
|
|
PERFETTO_DCHECK(category_index < category_count_);
|
|
// Matches the acquire_load in DataSource::Trace().
|
|
state_storage_[category_index].fetch_or(
|
|
static_cast<uint8_t>(1u << instance_index), std::memory_order_release);
|
|
}
|
|
|
|
void TrackEventCategoryRegistry::DisableCategoryForInstance(
|
|
size_t category_index,
|
|
uint32_t instance_index) const {
|
|
PERFETTO_DCHECK(instance_index < kMaxDataSourceInstances);
|
|
PERFETTO_DCHECK(category_index < category_count_);
|
|
// Matches the acquire_load in DataSource::Trace().
|
|
state_storage_[category_index].fetch_and(
|
|
static_cast<uint8_t>(~(1u << instance_index)), std::memory_order_release);
|
|
}
|
|
|
|
} // namespace internal
|
|
} // namespace perfetto
|
|
// gen_amalgamated begin source: src/tracing/track_event_legacy.cc
|
|
/*
|
|
* Copyright (C) 2020 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/tracing/track_event_legacy.h"
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/tracing/track.h"
|
|
|
|
namespace perfetto {
|
|
namespace legacy {
|
|
|
|
template <>
|
|
bool ConvertThreadId(const PerfettoLegacyCurrentThreadId&,
|
|
uint64_t*,
|
|
int32_t*,
|
|
int32_t*) {
|
|
// No need to override anything for events on to the current thread.
|
|
return false;
|
|
}
|
|
|
|
} // namespace legacy
|
|
|
|
namespace internal {
|
|
|
|
void LegacyTraceId::Write(protos::pbzero::TrackEvent::LegacyEvent* event,
|
|
uint32_t event_flags) const {
|
|
// Legacy flow events always use bind_id.
|
|
if (event_flags &
|
|
(legacy::kTraceEventFlagFlowOut | legacy::kTraceEventFlagFlowIn)) {
|
|
// Flow bind_ids don't have scopes, so we need to mangle in-process ones to
|
|
// avoid collisions.
|
|
if (id_flags_ & legacy::kTraceEventFlagHasLocalId) {
|
|
event->set_bind_id(raw_id_ ^ ProcessTrack::Current().uuid);
|
|
} else {
|
|
event->set_bind_id(raw_id_);
|
|
}
|
|
return;
|
|
}
|
|
|
|
uint32_t scope_flags = id_flags_ & (legacy::kTraceEventFlagHasId |
|
|
legacy::kTraceEventFlagHasLocalId |
|
|
legacy::kTraceEventFlagHasGlobalId);
|
|
switch (scope_flags) {
|
|
case legacy::kTraceEventFlagHasId:
|
|
event->set_unscoped_id(raw_id_);
|
|
break;
|
|
case legacy::kTraceEventFlagHasLocalId:
|
|
event->set_local_id(raw_id_);
|
|
break;
|
|
case legacy::kTraceEventFlagHasGlobalId:
|
|
event->set_global_id(raw_id_);
|
|
break;
|
|
}
|
|
if (scope_)
|
|
event->set_id_scope(scope_);
|
|
}
|
|
|
|
} // namespace internal
|
|
} // namespace perfetto
|
|
// gen_amalgamated begin source: src/tracing/virtual_destructors.cc
|
|
/*
|
|
* Copyright (C) 2019 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/tracing/internal/tracing_tls.h"
|
|
// gen_amalgamated expanded: #include "perfetto/tracing/tracing.h"
|
|
// gen_amalgamated expanded: #include "perfetto/tracing/tracing_backend.h"
|
|
|
|
// This translation unit contains the definitions for the destructor of pure
|
|
// virtual interfaces for the src/public:public target. The alternative would be
|
|
// introducing a one-liner .cc file for each pure virtual interface, which is
|
|
// overkill. This is for compliance with -Wweak-vtables.
|
|
|
|
namespace perfetto {
|
|
namespace internal {
|
|
|
|
TracingTLS::~TracingTLS() = default;
|
|
|
|
} // namespace internal
|
|
|
|
TracingBackend::~TracingBackend() = default;
|
|
TracingSession::~TracingSession() = default;
|
|
|
|
} // namespace perfetto
|
|
// gen_amalgamated begin source: src/tracing/core/metatrace_writer.cc
|
|
// gen_amalgamated begin header: src/tracing/core/metatrace_writer.h
|
|
/*
|
|
* Copyright (C) 2019 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
#ifndef SRC_TRACING_CORE_METATRACE_WRITER_H_
|
|
#define SRC_TRACING_CORE_METATRACE_WRITER_H_
|
|
|
|
#include <functional>
|
|
#include <memory>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/metatrace.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/thread_checker.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/weak_ptr.h"
|
|
|
|
namespace perfetto {
|
|
|
|
namespace base {
|
|
class TaskRunner;
|
|
}
|
|
|
|
class TraceWriter;
|
|
|
|
// Complements the base::metatrace infrastructure.
|
|
// It hooks a callback to metatrace::Enable() and writes metatrace events into
|
|
// a TraceWriter whenever the metatrace ring buffer is half full.
|
|
// It is safe to create and attempt to start multiple instances of this class,
|
|
// however only the first one will succeed because the metatrace framework
|
|
// doesn't support multiple instances.
|
|
// This class is defined here (instead of directly in src/probes/) so it can
|
|
// be reused by other components (e.g. heapprofd).
|
|
class MetatraceWriter {
|
|
public:
|
|
static constexpr char kDataSourceName[] = "perfetto.metatrace";
|
|
|
|
MetatraceWriter();
|
|
~MetatraceWriter();
|
|
|
|
MetatraceWriter(const MetatraceWriter&) = delete;
|
|
MetatraceWriter& operator=(const MetatraceWriter&) = delete;
|
|
MetatraceWriter(MetatraceWriter&&) = delete;
|
|
MetatraceWriter& operator=(MetatraceWriter&&) = delete;
|
|
|
|
void Enable(base::TaskRunner*, std::unique_ptr<TraceWriter>, uint32_t tags);
|
|
void Disable();
|
|
void WriteAllAndFlushTraceWriter(std::function<void()> callback);
|
|
|
|
private:
|
|
void WriteAllAvailableEvents();
|
|
|
|
bool started_ = false;
|
|
base::TaskRunner* task_runner_ = nullptr;
|
|
std::unique_ptr<TraceWriter> trace_writer_;
|
|
PERFETTO_THREAD_CHECKER(thread_checker_)
|
|
base::WeakPtrFactory<MetatraceWriter> weak_ptr_factory_; // Keep last.
|
|
};
|
|
|
|
} // namespace perfetto
|
|
|
|
#endif // SRC_TRACING_CORE_METATRACE_WRITER_H_
|
|
// gen_amalgamated begin header: gen/protos/perfetto/trace/perfetto/perfetto_metatrace.pbzero.h
|
|
// Autogenerated by the ProtoZero compiler plugin. DO NOT EDIT.
|
|
|
|
#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_PERFETTO_PERFETTO_METATRACE_PROTO_H_
|
|
#define PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_PERFETTO_PERFETTO_METATRACE_PROTO_H_
|
|
|
|
#include <stddef.h>
|
|
#include <stdint.h>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/proto_utils.h"
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace pbzero {
|
|
|
|
class PerfettoMetatrace_Arg;
|
|
|
|
class PerfettoMetatrace_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/9, /*HAS_NONPACKED_REPEATED_FIELDS=*/true> {
|
|
public:
|
|
PerfettoMetatrace_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
|
|
explicit PerfettoMetatrace_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
|
|
explicit PerfettoMetatrace_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
|
|
bool has_event_id() const { return at<1>().valid(); }
|
|
uint32_t event_id() const { return at<1>().as_uint32(); }
|
|
bool has_counter_id() const { return at<2>().valid(); }
|
|
uint32_t counter_id() const { return at<2>().as_uint32(); }
|
|
bool has_event_name() const { return at<8>().valid(); }
|
|
::protozero::ConstChars event_name() const { return at<8>().as_string(); }
|
|
bool has_counter_name() const { return at<9>().valid(); }
|
|
::protozero::ConstChars counter_name() const { return at<9>().as_string(); }
|
|
bool has_event_duration_ns() const { return at<3>().valid(); }
|
|
uint32_t event_duration_ns() const { return at<3>().as_uint32(); }
|
|
bool has_counter_value() const { return at<4>().valid(); }
|
|
int32_t counter_value() const { return at<4>().as_int32(); }
|
|
bool has_thread_id() const { return at<5>().valid(); }
|
|
uint32_t thread_id() const { return at<5>().as_uint32(); }
|
|
bool has_has_overruns() const { return at<6>().valid(); }
|
|
bool has_overruns() const { return at<6>().as_bool(); }
|
|
bool has_args() const { return at<7>().valid(); }
|
|
::protozero::RepeatedFieldIterator<::protozero::ConstBytes> args() const { return GetRepeated<::protozero::ConstBytes>(7); }
|
|
};
|
|
|
|
class PerfettoMetatrace : public ::protozero::Message {
|
|
public:
|
|
using Decoder = PerfettoMetatrace_Decoder;
|
|
enum : int32_t {
|
|
kEventIdFieldNumber = 1,
|
|
kCounterIdFieldNumber = 2,
|
|
kEventNameFieldNumber = 8,
|
|
kCounterNameFieldNumber = 9,
|
|
kEventDurationNsFieldNumber = 3,
|
|
kCounterValueFieldNumber = 4,
|
|
kThreadIdFieldNumber = 5,
|
|
kHasOverrunsFieldNumber = 6,
|
|
kArgsFieldNumber = 7,
|
|
};
|
|
using Arg = ::perfetto::protos::pbzero::PerfettoMetatrace_Arg;
|
|
void set_event_id(uint32_t value) {
|
|
AppendVarInt(1, value);
|
|
}
|
|
void set_counter_id(uint32_t value) {
|
|
AppendVarInt(2, value);
|
|
}
|
|
void set_event_name(const std::string& value) {
|
|
AppendBytes(8, value.data(), value.size());
|
|
}
|
|
void set_event_name(const char* data, size_t size) {
|
|
AppendBytes(8, data, size);
|
|
}
|
|
void set_counter_name(const std::string& value) {
|
|
AppendBytes(9, value.data(), value.size());
|
|
}
|
|
void set_counter_name(const char* data, size_t size) {
|
|
AppendBytes(9, data, size);
|
|
}
|
|
void set_event_duration_ns(uint32_t value) {
|
|
AppendVarInt(3, value);
|
|
}
|
|
void set_counter_value(int32_t value) {
|
|
AppendVarInt(4, value);
|
|
}
|
|
void set_thread_id(uint32_t value) {
|
|
AppendVarInt(5, value);
|
|
}
|
|
void set_has_overruns(bool value) {
|
|
AppendTinyVarInt(6, value);
|
|
}
|
|
template <typename T = PerfettoMetatrace_Arg> T* add_args() {
|
|
return BeginNestedMessage<T>(7);
|
|
}
|
|
|
|
};
|
|
|
|
class PerfettoMetatrace_Arg_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/2, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
|
|
public:
|
|
PerfettoMetatrace_Arg_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
|
|
explicit PerfettoMetatrace_Arg_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
|
|
explicit PerfettoMetatrace_Arg_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
|
|
bool has_key() const { return at<1>().valid(); }
|
|
::protozero::ConstChars key() const { return at<1>().as_string(); }
|
|
bool has_value() const { return at<2>().valid(); }
|
|
::protozero::ConstChars value() const { return at<2>().as_string(); }
|
|
};
|
|
|
|
class PerfettoMetatrace_Arg : public ::protozero::Message {
|
|
public:
|
|
using Decoder = PerfettoMetatrace_Arg_Decoder;
|
|
enum : int32_t {
|
|
kKeyFieldNumber = 1,
|
|
kValueFieldNumber = 2,
|
|
};
|
|
void set_key(const std::string& value) {
|
|
AppendBytes(1, value.data(), value.size());
|
|
}
|
|
void set_key(const char* data, size_t size) {
|
|
AppendBytes(1, data, size);
|
|
}
|
|
void set_value(const std::string& value) {
|
|
AppendBytes(2, value.data(), value.size());
|
|
}
|
|
void set_value(const char* data, size_t size) {
|
|
AppendBytes(2, data, size);
|
|
}
|
|
};
|
|
|
|
} // Namespace.
|
|
} // Namespace.
|
|
} // Namespace.
|
|
#endif // Include guard.
|
|
/*
|
|
* Copyright (C) 2019 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
// gen_amalgamated expanded: #include "src/tracing/core/metatrace_writer.h"
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/base/logging.h"
|
|
// gen_amalgamated expanded: #include "perfetto/base/task_runner.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/trace_writer.h"
|
|
// gen_amalgamated expanded: #include "perfetto/tracing/core/data_source_descriptor.h"
|
|
|
|
// gen_amalgamated expanded: #include "protos/perfetto/trace/perfetto/perfetto_metatrace.pbzero.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/trace/trace_packet.pbzero.h"
|
|
|
|
namespace perfetto {
|
|
|
|
// static
|
|
constexpr char MetatraceWriter::kDataSourceName[];
|
|
|
|
MetatraceWriter::MetatraceWriter() : weak_ptr_factory_(this) {}
|
|
|
|
MetatraceWriter::~MetatraceWriter() {
|
|
Disable();
|
|
}
|
|
|
|
void MetatraceWriter::Enable(base::TaskRunner* task_runner,
|
|
std::unique_ptr<TraceWriter> trace_writer,
|
|
uint32_t tags) {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
if (started_) {
|
|
PERFETTO_DFATAL_OR_ELOG("Metatrace already started from this instance");
|
|
return;
|
|
}
|
|
task_runner_ = task_runner;
|
|
trace_writer_ = std::move(trace_writer);
|
|
auto weak_ptr = weak_ptr_factory_.GetWeakPtr();
|
|
bool enabled = metatrace::Enable(
|
|
[weak_ptr] {
|
|
if (weak_ptr)
|
|
weak_ptr->WriteAllAvailableEvents();
|
|
},
|
|
task_runner, tags);
|
|
if (!enabled)
|
|
return;
|
|
started_ = true;
|
|
}
|
|
|
|
void MetatraceWriter::Disable() {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
if (!started_)
|
|
return;
|
|
metatrace::Disable();
|
|
started_ = false;
|
|
trace_writer_.reset();
|
|
}
|
|
|
|
void MetatraceWriter::WriteAllAvailableEvents() {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
if (!started_)
|
|
return;
|
|
for (auto it = metatrace::RingBuffer::GetReadIterator(); it; ++it) {
|
|
auto type_and_id = it->type_and_id.load(std::memory_order_acquire);
|
|
if (type_and_id == 0)
|
|
break; // Stop at the first incomplete event.
|
|
|
|
auto packet = trace_writer_->NewTracePacket();
|
|
packet->set_timestamp(it->timestamp_ns());
|
|
auto* evt = packet->set_perfetto_metatrace();
|
|
uint16_t type = type_and_id & metatrace::Record::kTypeMask;
|
|
uint16_t id = type_and_id & ~metatrace::Record::kTypeMask;
|
|
if (type == metatrace::Record::kTypeCounter) {
|
|
evt->set_counter_id(id);
|
|
evt->set_counter_value(it->counter_value);
|
|
} else {
|
|
evt->set_event_id(id);
|
|
evt->set_event_duration_ns(it->duration_ns);
|
|
}
|
|
|
|
evt->set_thread_id(static_cast<uint32_t>(it->thread_id));
|
|
|
|
if (metatrace::RingBuffer::has_overruns())
|
|
evt->set_has_overruns(true);
|
|
}
|
|
// The |it| destructor will automatically update the read index position in
|
|
// the meta-trace ring buffer.
|
|
}
|
|
|
|
void MetatraceWriter::WriteAllAndFlushTraceWriter(
|
|
std::function<void()> callback) {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
if (!started_)
|
|
return;
|
|
WriteAllAvailableEvents();
|
|
trace_writer_->Flush(std::move(callback));
|
|
}
|
|
|
|
} // namespace perfetto
|
|
// gen_amalgamated begin source: src/tracing/core/packet_stream_validator.cc
|
|
// gen_amalgamated begin header: src/tracing/core/packet_stream_validator.h
|
|
/*
|
|
* Copyright (C) 2018 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
#ifndef SRC_TRACING_CORE_PACKET_STREAM_VALIDATOR_H_
|
|
#define SRC_TRACING_CORE_PACKET_STREAM_VALIDATOR_H_
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/slice.h"
|
|
|
|
namespace perfetto {
|
|
|
|
// Checks that the stream of trace packets sent by the producer is well formed.
|
|
// This includes:
|
|
//
|
|
// - Checking that the packets are not truncated.
|
|
// - There are no dangling bytes left over in the packets.
|
|
// - Any trusted fields (e.g., uid) are not set.
|
|
//
|
|
// Note that we only validate top-level fields in the trace proto; sub-messages
|
|
// are simply skipped.
|
|
class PacketStreamValidator {
|
|
public:
|
|
PacketStreamValidator() = delete;
|
|
|
|
static bool Validate(const Slices&);
|
|
};
|
|
|
|
} // namespace perfetto
|
|
|
|
#endif // SRC_TRACING_CORE_PACKET_STREAM_VALIDATOR_H_
|
|
/*
|
|
* Copyright (C) 2018 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
// gen_amalgamated expanded: #include "src/tracing/core/packet_stream_validator.h"
|
|
|
|
#include <inttypes.h>
|
|
#include <stddef.h>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/base/logging.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/utils.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/proto_utils.h"
|
|
|
|
// gen_amalgamated expanded: #include "protos/perfetto/trace/trace_packet.pbzero.h"
|
|
|
|
namespace perfetto {
|
|
|
|
namespace {
|
|
|
|
using protozero::proto_utils::ProtoWireType;
|
|
|
|
const uint32_t kReservedFieldIds[] = {
|
|
protos::pbzero::TracePacket::kTrustedUidFieldNumber,
|
|
protos::pbzero::TracePacket::kTrustedPacketSequenceIdFieldNumber,
|
|
protos::pbzero::TracePacket::kTraceConfigFieldNumber,
|
|
protos::pbzero::TracePacket::kTraceStatsFieldNumber,
|
|
protos::pbzero::TracePacket::kCompressedPacketsFieldNumber,
|
|
protos::pbzero::TracePacket::kSynchronizationMarkerFieldNumber,
|
|
};
|
|
|
|
// This translation unit is quite subtle and perf-sensitive. Remember to check
|
|
// BM_PacketStreamValidator in perfetto_benchmarks when making changes.
|
|
|
|
// Checks that a packet, spread over several slices, is well-formed and doesn't
|
|
// contain reserved top-level fields.
|
|
// The checking logic is based on a state-machine that skips the fields' payload
|
|
// and operates as follows:
|
|
// +-------------------------------+ <-------------------------+
|
|
// +----------> | Read field preamble (varint) | <----------------------+ |
|
|
// | +-------------------------------+ | |
|
|
// | | | | | |
|
|
// | <Varint> <Fixed 32/64> <Length-delimited field> | |
|
|
// | V | V | |
|
|
// | +------------------+ | +--------------+ | |
|
|
// | | Read field value | | | Read length | | |
|
|
// | | (another varint) | | | (varint) | | |
|
|
// | +------------------+ | +--------------+ | |
|
|
// | | V V | |
|
|
// +-----------+ +----------------+ +-----------------+ | |
|
|
// | Skip 4/8 Bytes | | Skip $len Bytes |-------+ |
|
|
// +----------------+ +-----------------+ |
|
|
// | |
|
|
// +------------------------------------------+
|
|
class ProtoFieldParserFSM {
|
|
public:
|
|
// This method effectively continuously parses varints (either for the field
|
|
// preamble or the payload or the submessage length) and tells the caller
|
|
// (the Validate() method) how many bytes to skip until the next field.
|
|
size_t Push(uint8_t octet) {
|
|
varint_ |= static_cast<uint64_t>(octet & 0x7F) << varint_shift_;
|
|
if (octet & 0x80) {
|
|
varint_shift_ += 7;
|
|
if (varint_shift_ >= 64)
|
|
state_ = kInvalidVarInt;
|
|
return 0;
|
|
}
|
|
uint64_t varint = varint_;
|
|
varint_ = 0;
|
|
varint_shift_ = 0;
|
|
|
|
switch (state_) {
|
|
case kFieldPreamble: {
|
|
uint64_t field_type = varint & 7; // 7 = 0..0111
|
|
auto field_id = static_cast<uint32_t>(varint >> 3);
|
|
// Check if the field id is reserved, go into an error state if it is.
|
|
for (size_t i = 0; i < base::ArraySize(kReservedFieldIds); ++i) {
|
|
if (field_id == kReservedFieldIds[i]) {
|
|
state_ = kWroteReservedField;
|
|
return 0;
|
|
}
|
|
}
|
|
// The field type is legit, now check it's well formed and within
|
|
// boundaries.
|
|
if (field_type == static_cast<uint64_t>(ProtoWireType::kVarInt)) {
|
|
state_ = kVarIntValue;
|
|
} else if (field_type ==
|
|
static_cast<uint64_t>(ProtoWireType::kFixed32)) {
|
|
return 4;
|
|
} else if (field_type ==
|
|
static_cast<uint64_t>(ProtoWireType::kFixed64)) {
|
|
return 8;
|
|
} else if (field_type ==
|
|
static_cast<uint64_t>(ProtoWireType::kLengthDelimited)) {
|
|
state_ = kLenDelimitedLen;
|
|
} else {
|
|
state_ = kUnknownFieldType;
|
|
}
|
|
return 0;
|
|
}
|
|
|
|
case kVarIntValue: {
|
|
// Consume the int field payload and go back to the next field.
|
|
state_ = kFieldPreamble;
|
|
return 0;
|
|
}
|
|
|
|
case kLenDelimitedLen: {
|
|
if (varint > protozero::proto_utils::kMaxMessageLength) {
|
|
state_ = kMessageTooBig;
|
|
return 0;
|
|
}
|
|
state_ = kFieldPreamble;
|
|
return static_cast<size_t>(varint);
|
|
}
|
|
|
|
case kWroteReservedField:
|
|
case kUnknownFieldType:
|
|
case kMessageTooBig:
|
|
case kInvalidVarInt:
|
|
// Persistent error states.
|
|
return 0;
|
|
|
|
} // switch(state_)
|
|
return 0; // To keep GCC happy.
|
|
}
|
|
|
|
// Queried at the end of the all payload. A message is well-formed only
|
|
// if the FSM is back to the state where it should parse the next field and
|
|
// hasn't started parsing any preamble.
|
|
bool valid() const { return state_ == kFieldPreamble && varint_shift_ == 0; }
|
|
int state() const { return static_cast<int>(state_); }
|
|
|
|
private:
|
|
enum State {
|
|
kFieldPreamble = 0, // Parsing the varint for the field preamble.
|
|
kVarIntValue, // Parsing the varint value for the field payload.
|
|
kLenDelimitedLen, // Parsing the length of the length-delimited field.
|
|
|
|
// Error states:
|
|
kWroteReservedField, // Tried to set a reserved field id.
|
|
kUnknownFieldType, // Encountered an invalid field type.
|
|
kMessageTooBig, // Size of the length delimited message was too big.
|
|
kInvalidVarInt, // VarInt larger than 64 bits.
|
|
};
|
|
|
|
State state_ = kFieldPreamble;
|
|
uint64_t varint_ = 0;
|
|
uint32_t varint_shift_ = 0;
|
|
};
|
|
|
|
} // namespace
|
|
|
|
// static
|
|
bool PacketStreamValidator::Validate(const Slices& slices) {
|
|
ProtoFieldParserFSM parser;
|
|
size_t skip_bytes = 0;
|
|
for (const Slice& slice : slices) {
|
|
for (size_t i = 0; i < slice.size;) {
|
|
const size_t skip_bytes_cur_slice = std::min(skip_bytes, slice.size - i);
|
|
if (skip_bytes_cur_slice > 0) {
|
|
i += skip_bytes_cur_slice;
|
|
skip_bytes -= skip_bytes_cur_slice;
|
|
} else {
|
|
uint8_t octet = *(reinterpret_cast<const uint8_t*>(slice.start) + i);
|
|
skip_bytes = parser.Push(octet);
|
|
i++;
|
|
}
|
|
}
|
|
}
|
|
if (skip_bytes == 0 && parser.valid())
|
|
return true;
|
|
|
|
PERFETTO_DLOG("Packet validation error (state %d, skip = %zu)",
|
|
parser.state(), skip_bytes);
|
|
return false;
|
|
}
|
|
|
|
} // namespace perfetto
|
|
// gen_amalgamated begin source: src/tracing/core/trace_buffer.cc
|
|
// gen_amalgamated begin header: src/tracing/core/trace_buffer.h
|
|
/*
|
|
* Copyright (C) 2018 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
#ifndef SRC_TRACING_CORE_TRACE_BUFFER_H_
|
|
#define SRC_TRACING_CORE_TRACE_BUFFER_H_
|
|
|
|
#include <stdint.h>
|
|
#include <string.h>
|
|
|
|
#include <array>
|
|
#include <limits>
|
|
#include <map>
|
|
#include <tuple>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/base/logging.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/paged_memory.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/thread_annotations.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/utils.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/basic_types.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/slice.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/trace_stats.h"
|
|
|
|
namespace perfetto {
|
|
|
|
class TracePacket;
|
|
|
|
// The main buffer, owned by the tracing service, where all the trace data is
|
|
// ultimately stored into. The service will own several instances of this class,
|
|
// at least one per active consumer (as defined in the |buffers| section of
|
|
// trace_config.proto) and will copy chunks from the producer's shared memory
|
|
// buffers into here when a CommitData IPC is received.
|
|
//
|
|
// Writing into the buffer
|
|
// -----------------------
|
|
// Data is copied from the SMB(s) using CopyChunkUntrusted(). The buffer will
|
|
// hence contain data coming from different producers and different writer
|
|
// sequences, more specifically:
|
|
// - The service receives data by several producer(s), identified by their ID.
|
|
// - Each producer writes several sequences identified by the same WriterID.
|
|
// (they correspond to TraceWriter instances in the producer).
|
|
// - Each Writer writes, in order, several chunks.
|
|
// - Each chunk contains zero, one, or more TracePacket(s), or even just
|
|
// fragments of packets (when they span across several chunks).
|
|
//
|
|
// So at any point in time, the buffer will contain a variable number of logical
|
|
// sequences identified by the {ProducerID, WriterID} tuple. Any given chunk
|
|
// will only contain packets (or fragments) belonging to the same sequence.
|
|
//
|
|
// The buffer operates by default as a ring buffer.
|
|
// It has two overwrite policies:
|
|
// 1. kOverwrite (default): if the write pointer reaches the read pointer, old
|
|
// unread chunks will be overwritten by new chunks.
|
|
// 2. kDiscard: if the write pointer reaches the read pointer, unread chunks
|
|
// are preserved and the new chunks are discarded. Any future write becomes
|
|
// a no-op, even if the reader manages to fully catch up. This is because
|
|
// once a chunk is discarded, the sequence of packets is broken and trying
|
|
// to recover would be too hard (also due to the fact that, at the same
|
|
// time, we allow out-of-order commits and chunk re-writes).
|
|
//
|
|
// Chunks are (over)written in the same order of the CopyChunkUntrusted() calls.
|
|
// When overwriting old content, entire chunks are overwritten or clobbered.
|
|
// The buffer never leaves a partial chunk around. Chunks' payload is copied
|
|
// as-is, but their header is not and is repacked in order to keep the
|
|
// ProducerID around.
|
|
//
|
|
// Chunks are stored in the buffer next to each other. Each chunk is prefixed by
|
|
// an inline header (ChunkRecord), which contains most of the fields of the
|
|
// SharedMemoryABI ChunkHeader + the ProducerID + the size of the payload.
|
|
// It's a conventional binary object stream essentially, where each ChunkRecord
|
|
// tells where it ends and hence where to find the next one, like this:
|
|
//
|
|
// .-------------------------. 16 byte boundary
|
|
// | ChunkRecord: 16 bytes |
|
|
// | - chunk id: 4 bytes |
|
|
// | - producer id: 2 bytes |
|
|
// | - writer id: 2 bytes |
|
|
// | - #fragments: 2 bytes |
|
|
// +-----+ - record size: 2 bytes |
|
|
// | | - flags+pad: 4 bytes |
|
|
// | +-------------------------+
|
|
// | | |
|
|
// | : Chunk payload :
|
|
// | | |
|
|
// | +-------------------------+
|
|
// | | Optional padding |
|
|
// +---> +-------------------------+ 16 byte boundary
|
|
// | ChunkRecord |
|
|
// : :
|
|
// Chunks stored in the buffer are always rounded up to 16 bytes (that is
|
|
// sizeof(ChunkRecord)), in order to avoid further inner fragmentation.
|
|
// Special "padding" chunks can be put in the buffer, e.g. in the case when we
|
|
// try to write a chunk of size N while the write pointer is at the end of the
|
|
// buffer, but the write pointer is < N bytes from the end (and hence needs to
|
|
// wrap over).
|
|
// Because of this, the buffer is self-describing: the contents of the buffer
|
|
// can be reconstructed by just looking at the buffer content (this will be
|
|
// quite useful in future to recover the buffer from crash reports).
|
|
//
|
|
// However, in order to keep some operations (patching and reading) fast, a
|
|
// lookaside index is maintained (in |index_|), keeping each chunk in the buffer
|
|
// indexed by their {ProducerID, WriterID, ChunkID} tuple.
|
|
//
|
|
// Patching data out-of-band
|
|
// -------------------------
|
|
// This buffer also supports patching chunks' payload out-of-band, after they
|
|
// have been stored. This is to allow producers to backfill the "size" fields
|
|
// of the protos that spawn across several chunks, when the previous chunks are
|
|
// returned to the service. The MaybePatchChunkContents() deals with the fact
|
|
// that a chunk might have been lost (because of wrapping) by the time the OOB
|
|
// IPC comes.
|
|
//
|
|
// Reading from the buffer
|
|
// -----------------------
|
|
// This class supports one reader only (the consumer). Reads are NOT idempotent
|
|
// as they move the read cursors around. Reading back the buffer is the most
|
|
// conceptually complex part. The ReadNextTracePacket() method operates with
|
|
// whole packet granularity. Packets are returned only when all their fragments
|
|
// are available.
|
|
// This class takes care of:
|
|
// - Gluing packets within the same sequence, even if they are not stored
|
|
// adjacently in the buffer.
|
|
// - Re-ordering chunks within a sequence (using the ChunkID, which wraps).
|
|
// - Detecting holes in packet fragments (because of loss of chunks).
|
|
// Reads guarantee that packets for the same sequence are read in FIFO order
|
|
// (according to their ChunkID), but don't give any guarantee about the read
|
|
// order of packets from different sequences, see comments in
|
|
// ReadNextTracePacket() below.
|
|
class TraceBuffer {
|
|
public:
|
|
static const size_t InlineChunkHeaderSize; // For test/fake_packet.{cc,h}.
|
|
|
|
// See comment in the header above.
|
|
enum OverwritePolicy { kOverwrite, kDiscard };
|
|
|
|
// Argument for out-of-band patches applied through TryPatchChunkContents().
|
|
struct Patch {
|
|
// From SharedMemoryABI::kPacketHeaderSize.
|
|
static constexpr size_t kSize = 4;
|
|
|
|
size_t offset_untrusted;
|
|
std::array<uint8_t, kSize> data;
|
|
};
|
|
|
|
// Identifiers that are constant for a packet sequence.
|
|
struct PacketSequenceProperties {
|
|
ProducerID producer_id_trusted;
|
|
uid_t producer_uid_trusted;
|
|
WriterID writer_id;
|
|
};
|
|
|
|
// Can return nullptr if the memory allocation fails.
|
|
static std::unique_ptr<TraceBuffer> Create(size_t size_in_bytes,
|
|
OverwritePolicy = kOverwrite);
|
|
|
|
~TraceBuffer();
|
|
|
|
// Copies a Chunk from a producer Shared Memory Buffer into the trace buffer.
|
|
// |src| points to the first packet in the SharedMemoryABI's chunk shared with
|
|
// an untrusted producer. "untrusted" here means: the producer might be
|
|
// malicious and might change |src| concurrently while we read it (internally
|
|
// this method memcpy()-s first the chunk before processing it). None of the
|
|
// arguments should be trusted, unless otherwise stated. We can trust that
|
|
// |src| points to a valid memory area, but not its contents.
|
|
//
|
|
// This method may be called multiple times for the same chunk. In this case,
|
|
// the original chunk's payload will be overridden and its number of fragments
|
|
// and flags adjusted to match |num_fragments| and |chunk_flags|. The service
|
|
// may use this to insert partial chunks (|chunk_complete = false|) before the
|
|
// producer has committed them.
|
|
//
|
|
// If |chunk_complete| is |false|, the TraceBuffer will only consider the
|
|
// first |num_fragments - 1| packets to be complete, since the producer may
|
|
// not have finished writing the latest packet. Reading from a sequence will
|
|
// also not progress past any incomplete chunks until they were rewritten with
|
|
// |chunk_complete = true|, e.g. after a producer's commit.
|
|
//
|
|
// TODO(eseckler): Pass in a PacketStreamProperties instead of individual IDs.
|
|
void CopyChunkUntrusted(ProducerID producer_id_trusted,
|
|
uid_t producer_uid_trusted,
|
|
WriterID writer_id,
|
|
ChunkID chunk_id,
|
|
uint16_t num_fragments,
|
|
uint8_t chunk_flags,
|
|
bool chunk_complete,
|
|
const uint8_t* src,
|
|
size_t size);
|
|
// Applies a batch of |patches| to the given chunk, if the given chunk is
|
|
// still in the buffer. Does nothing if the given ChunkID is gone.
|
|
// Returns true if the chunk has been found and patched, false otherwise.
|
|
// |other_patches_pending| is used to determine whether this is the only
|
|
// batch of patches for the chunk or there is more.
|
|
// If |other_patches_pending| == false, the chunk is marked as ready to be
|
|
// consumed. If true, the state of the chunk is not altered.
|
|
bool TryPatchChunkContents(ProducerID,
|
|
WriterID,
|
|
ChunkID,
|
|
const Patch* patches,
|
|
size_t patches_size,
|
|
bool other_patches_pending);
|
|
|
|
// To read the contents of the buffer the caller needs to:
|
|
// BeginRead()
|
|
// while (ReadNextTracePacket(packet_fragments)) { ... }
|
|
// No other calls to any other method should be interleaved between
|
|
// BeginRead() and ReadNextTracePacket().
|
|
// Reads in the TraceBuffer are NOT idempotent.
|
|
void BeginRead();
|
|
|
|
// Returns the next packet in the buffer, if any, and the producer_id,
|
|
// producer_uid, and writer_id of the producer/writer that wrote it (as passed
|
|
// in the CopyChunkUntrusted() call). Returns false if no packets can be read
|
|
// at this point. If a packet was read successfully,
|
|
// |previous_packet_on_sequence_dropped| is set to |true| if the previous
|
|
// packet on the sequence was dropped from the buffer before it could be read
|
|
// (e.g. because its chunk was overridden due to the ring buffer wrapping or
|
|
// due to an ABI violation), and to |false| otherwise.
|
|
//
|
|
// This function returns only complete packets. Specifically:
|
|
// When there is at least one complete packet in the buffer, this function
|
|
// returns true and populates the TracePacket argument with the boundaries of
|
|
// each fragment for one packet.
|
|
// TracePacket will have at least one slice when this function returns true.
|
|
// When there are no whole packets eligible to read (e.g. we are still missing
|
|
// fragments) this function returns false.
|
|
// This function guarantees also that packets for a given
|
|
// {ProducerID, WriterID} are read in FIFO order.
|
|
// This function does not guarantee any ordering w.r.t. packets belonging to
|
|
// different WriterID(s). For instance, given the following packets copied
|
|
// into the buffer:
|
|
// {ProducerID: 1, WriterID: 1}: P1 P2 P3
|
|
// {ProducerID: 1, WriterID: 2}: P4 P5 P6
|
|
// {ProducerID: 2, WriterID: 1}: P7 P8 P9
|
|
// The following read sequence is possible:
|
|
// P1, P4, P7, P2, P3, P5, P8, P9, P6
|
|
// But the following is guaranteed to NOT happen:
|
|
// P1, P5, P7, P4 (P4 cannot come after P5)
|
|
bool ReadNextTracePacket(TracePacket*,
|
|
PacketSequenceProperties* sequence_properties,
|
|
bool* previous_packet_on_sequence_dropped);
|
|
|
|
const TraceStats::BufferStats& stats() const { return stats_; }
|
|
size_t size() const { return size_; }
|
|
|
|
private:
|
|
friend class TraceBufferTest;
|
|
|
|
// ChunkRecord is a Chunk header stored inline in the |data_| buffer, before
|
|
// the chunk payload (the packets' data). The |data_| buffer looks like this:
|
|
// +---------------+------------------++---------------+-----------------+
|
|
// | ChunkRecord 1 | Chunk payload 1 || ChunkRecord 2 | Chunk payload 2 | ...
|
|
// +---------------+------------------++---------------+-----------------+
|
|
// Most of the ChunkRecord fields are copied from SharedMemoryABI::ChunkHeader
|
|
// (the chunk header used in the shared memory buffers).
|
|
// A ChunkRecord can be a special "padding" record. In this case its payload
|
|
// should be ignored and the record should be just skipped.
|
|
//
|
|
// Full page move optimization:
|
|
// This struct has to be exactly (sizeof(PageHeader) + sizeof(ChunkHeader))
|
|
// (from shared_memory_abi.h) to allow full page move optimizations
|
|
// (TODO(primiano): not implemented yet). In the special case of moving a full
|
|
// 4k page that contains only one chunk, in fact, we can just ask the kernel
|
|
// to move the full SHM page (see SPLICE_F_{GIFT,MOVE}) and overlay the
|
|
// ChunkRecord on top of the moved SMB's header (page + chunk header).
|
|
// This special requirement is covered by static_assert(s) in the .cc file.
|
|
struct ChunkRecord {
|
|
explicit ChunkRecord(size_t sz) : flags{0}, is_padding{0} {
|
|
PERFETTO_DCHECK(sz >= sizeof(ChunkRecord) &&
|
|
sz % sizeof(ChunkRecord) == 0 && sz <= kMaxSize);
|
|
size = static_cast<decltype(size)>(sz);
|
|
}
|
|
|
|
bool is_valid() const { return size != 0; }
|
|
|
|
// Keep this structure packed and exactly 16 bytes (128 bits) big.
|
|
|
|
// [32 bits] Monotonic counter within the same writer_id.
|
|
ChunkID chunk_id = 0;
|
|
|
|
// [16 bits] ID of the Producer from which the Chunk was copied from.
|
|
ProducerID producer_id = 0;
|
|
|
|
// [16 bits] Unique per Producer (but not within the service).
|
|
// If writer_id == kWriterIdPadding the record should just be skipped.
|
|
WriterID writer_id = 0;
|
|
|
|
// Number of fragments contained in the chunk.
|
|
uint16_t num_fragments = 0;
|
|
|
|
// Size in bytes, including sizeof(ChunkRecord) itself.
|
|
uint16_t size;
|
|
|
|
uint8_t flags : 6; // See SharedMemoryABI::ChunkHeader::flags.
|
|
uint8_t is_padding : 1;
|
|
uint8_t unused_flag : 1;
|
|
|
|
// Not strictly needed, can be reused for more fields in the future. But
|
|
// right now helps to spot chunks in hex dumps.
|
|
char unused[3] = {'C', 'H', 'U'};
|
|
|
|
static constexpr size_t kMaxSize =
|
|
std::numeric_limits<decltype(size)>::max();
|
|
};
|
|
|
|
// Lookaside index entry. This serves two purposes:
|
|
// 1) Allow a fast lookup of ChunkRecord by their ID (the tuple
|
|
// {ProducerID, WriterID, ChunkID}). This is used when applying out-of-band
|
|
// patches to the contents of the chunks after they have been copied into
|
|
// the TraceBuffer.
|
|
// 2) keep the chunks ordered by their ID. This is used when reading back.
|
|
// 3) Keep metadata about the status of the chunk, e.g. whether the contents
|
|
// have been read already and should be skipped in a future read pass.
|
|
// This struct should not have any field that is essential for reconstructing
|
|
// the contents of the buffer from a crash dump.
|
|
struct ChunkMeta {
|
|
// Key used for sorting in the map.
|
|
struct Key {
|
|
Key(ProducerID p, WriterID w, ChunkID c)
|
|
: producer_id{p}, writer_id{w}, chunk_id{c} {}
|
|
|
|
explicit Key(const ChunkRecord& cr)
|
|
: Key(cr.producer_id, cr.writer_id, cr.chunk_id) {}
|
|
|
|
// Note that this sorting doesn't keep into account the fact that ChunkID
|
|
// will wrap over at some point. The extra logic in SequenceIterator deals
|
|
// with that.
|
|
bool operator<(const Key& other) const {
|
|
return std::tie(producer_id, writer_id, chunk_id) <
|
|
std::tie(other.producer_id, other.writer_id, other.chunk_id);
|
|
}
|
|
|
|
bool operator==(const Key& other) const {
|
|
return std::tie(producer_id, writer_id, chunk_id) ==
|
|
std::tie(other.producer_id, other.writer_id, other.chunk_id);
|
|
}
|
|
|
|
bool operator!=(const Key& other) const { return !(*this == other); }
|
|
|
|
// These fields should match at all times the corresponding fields in
|
|
// the |chunk_record|. They are copied here purely for efficiency to avoid
|
|
// dereferencing the buffer all the time.
|
|
ProducerID producer_id;
|
|
WriterID writer_id;
|
|
ChunkID chunk_id;
|
|
};
|
|
|
|
enum IndexFlags : uint8_t {
|
|
// If set, the chunk state was kChunkComplete at the time it was copied.
|
|
// If unset, the chunk was still kChunkBeingWritten while copied. When
|
|
// reading from the chunk's sequence, the sequence will not advance past
|
|
// this chunk until this flag is set.
|
|
kComplete = 1 << 0,
|
|
|
|
// If set, we skipped the last packet that we read from this chunk e.g.
|
|
// because we it was a continuation from a previous chunk that was dropped
|
|
// or due to an ABI violation.
|
|
kLastReadPacketSkipped = 1 << 1
|
|
};
|
|
|
|
ChunkMeta(ChunkRecord* r, uint16_t p, bool complete, uint8_t f, uid_t u)
|
|
: chunk_record{r}, trusted_uid{u}, flags{f}, num_fragments{p} {
|
|
if (complete)
|
|
index_flags = kComplete;
|
|
}
|
|
|
|
bool is_complete() const { return index_flags & kComplete; }
|
|
|
|
void set_complete(bool complete) {
|
|
if (complete) {
|
|
index_flags |= kComplete;
|
|
} else {
|
|
index_flags &= ~kComplete;
|
|
}
|
|
}
|
|
|
|
bool last_read_packet_skipped() const {
|
|
return index_flags & kLastReadPacketSkipped;
|
|
}
|
|
|
|
void set_last_read_packet_skipped(bool skipped) {
|
|
if (skipped) {
|
|
index_flags |= kLastReadPacketSkipped;
|
|
} else {
|
|
index_flags &= ~kLastReadPacketSkipped;
|
|
}
|
|
}
|
|
|
|
ChunkRecord* const chunk_record; // Addr of ChunkRecord within |data_|.
|
|
const uid_t trusted_uid; // uid of the producer.
|
|
|
|
// Flags set by TraceBuffer to track the state of the chunk in the index.
|
|
uint8_t index_flags = 0;
|
|
|
|
// Correspond to |chunk_record->flags| and |chunk_record->num_fragments|.
|
|
// Copied here for performance reasons (avoids having to dereference
|
|
// |chunk_record| while iterating over ChunkMeta) and to aid debugging in
|
|
// case the buffer gets corrupted.
|
|
uint8_t flags = 0; // See SharedMemoryABI::ChunkHeader::flags.
|
|
uint16_t num_fragments = 0; // Total number of packet fragments.
|
|
|
|
uint16_t num_fragments_read = 0; // Number of fragments already read.
|
|
|
|
// The start offset of the next fragment (the |num_fragments_read|-th) to be
|
|
// read. This is the offset in bytes from the beginning of the ChunkRecord's
|
|
// payload (the 1st fragment starts at |chunk_record| +
|
|
// sizeof(ChunkRecord)).
|
|
uint16_t cur_fragment_offset = 0;
|
|
};
|
|
|
|
using ChunkMap = std::map<ChunkMeta::Key, ChunkMeta>;
|
|
|
|
// Allows to iterate over a sub-sequence of |index_| for all keys belonging to
|
|
// the same {ProducerID,WriterID}. Furthermore takes into account the wrapping
|
|
// of ChunkID. Instances are valid only as long as the |index_| is not altered
|
|
// (can be used safely only between adjacent ReadNextTracePacket() calls).
|
|
// The order of the iteration will proceed in the following order:
|
|
// |wrapping_id| + 1 -> |seq_end|, |seq_begin| -> |wrapping_id|.
|
|
// Practical example:
|
|
// - Assume that kMaxChunkID == 7
|
|
// - Assume that we have all 8 chunks in the range (0..7).
|
|
// - Hence, |seq_begin| == c0, |seq_end| == c7
|
|
// - Assume |wrapping_id| = 4 (c4 is the last chunk copied over
|
|
// through a CopyChunkUntrusted()).
|
|
// The resulting iteration order will be: c5, c6, c7, c0, c1, c2, c3, c4.
|
|
struct SequenceIterator {
|
|
// Points to the 1st key (the one with the numerically min ChunkID).
|
|
ChunkMap::iterator seq_begin;
|
|
|
|
// Points one past the last key (the one with the numerically max ChunkID).
|
|
ChunkMap::iterator seq_end;
|
|
|
|
// Current iterator, always >= seq_begin && <= seq_end.
|
|
ChunkMap::iterator cur;
|
|
|
|
// The latest ChunkID written. Determines the start/end of the sequence.
|
|
ChunkID wrapping_id;
|
|
|
|
bool is_valid() const { return cur != seq_end; }
|
|
|
|
ProducerID producer_id() const {
|
|
PERFETTO_DCHECK(is_valid());
|
|
return cur->first.producer_id;
|
|
}
|
|
|
|
WriterID writer_id() const {
|
|
PERFETTO_DCHECK(is_valid());
|
|
return cur->first.writer_id;
|
|
}
|
|
|
|
ChunkID chunk_id() const {
|
|
PERFETTO_DCHECK(is_valid());
|
|
return cur->first.chunk_id;
|
|
}
|
|
|
|
ChunkMeta& operator*() {
|
|
PERFETTO_DCHECK(is_valid());
|
|
return cur->second;
|
|
}
|
|
|
|
// Moves |cur| to the next chunk in the index.
|
|
// is_valid() will become false after calling this, if this was the last
|
|
// entry of the sequence.
|
|
void MoveNext();
|
|
|
|
void MoveToEnd() { cur = seq_end; }
|
|
};
|
|
|
|
enum class ReadAheadResult {
|
|
kSucceededReturnSlices,
|
|
kFailedMoveToNextSequence,
|
|
kFailedStayOnSameSequence,
|
|
};
|
|
|
|
enum class ReadPacketResult {
|
|
kSucceeded,
|
|
kFailedInvalidPacket,
|
|
kFailedEmptyPacket,
|
|
};
|
|
|
|
explicit TraceBuffer(OverwritePolicy);
|
|
TraceBuffer(const TraceBuffer&) = delete;
|
|
TraceBuffer& operator=(const TraceBuffer&) = delete;
|
|
|
|
bool Initialize(size_t size);
|
|
|
|
// Returns an object that allows to iterate over chunks in the |index_| that
|
|
// have the same {ProducerID, WriterID} of
|
|
// |seq_begin.first.{producer,writer}_id|. |seq_begin| must be an iterator to
|
|
// the first entry in the |index_| that has a different {ProducerID, WriterID}
|
|
// from the previous one. It is valid for |seq_begin| to be == index_.end()
|
|
// (i.e. if the index is empty). The iteration takes care of ChunkID wrapping,
|
|
// by using |last_chunk_id_|.
|
|
SequenceIterator GetReadIterForSequence(ChunkMap::iterator seq_begin);
|
|
|
|
// Used as a last resort when a buffer corruption is detected.
|
|
void ClearContentsAndResetRWCursors();
|
|
|
|
// Adds a padding record of the given size (must be a multiple of
|
|
// sizeof(ChunkRecord)).
|
|
void AddPaddingRecord(size_t);
|
|
|
|
// Look for contiguous fragment of the same packet starting from |read_iter_|.
|
|
// If a contiguous packet is found, all the fragments are pushed into
|
|
// TracePacket and the function returns kSucceededReturnSlices. If not, the
|
|
// function returns either kFailedMoveToNextSequence or
|
|
// kFailedStayOnSameSequence, telling the caller to continue looking for
|
|
// packets.
|
|
ReadAheadResult ReadAhead(TracePacket*);
|
|
|
|
// Deletes (by marking the record invalid and removing form the index) all
|
|
// chunks from |wptr_| to |wptr_| + |bytes_to_clear|.
|
|
// Returns:
|
|
// * The size of the gap left between the next valid Chunk and the end of
|
|
// the deletion range.
|
|
// * 0 if no next valid chunk exists (if the buffer is still zeroed).
|
|
// * -1 if the buffer |overwrite_policy_| == kDiscard and the deletion would
|
|
// cause unread chunks to be overwritten. In this case the buffer is left
|
|
// untouched.
|
|
// Graphically, assume the initial situation is the following (|wptr_| = 10).
|
|
// |0 |10 (wptr_) |30 |40 |60
|
|
// +---------+-----------------+---------+-------------------+---------+
|
|
// | Chunk 1 | Chunk 2 | Chunk 3 | Chunk 4 | Chunk 5 |
|
|
// +---------+-----------------+---------+-------------------+---------+
|
|
// |_________Deletion range_______|~~return value~~|
|
|
//
|
|
// A call to DeleteNextChunksFor(32) will remove chunks 2,3,4 and return 18
|
|
// (60 - 42), the distance between chunk 5 and the end of the deletion range.
|
|
ssize_t DeleteNextChunksFor(size_t bytes_to_clear);
|
|
|
|
// Decodes the boundaries of the next packet (or a fragment) pointed by
|
|
// ChunkMeta and pushes that into |TracePacket|. It also increments the
|
|
// |num_fragments_read| counter.
|
|
// TracePacket can be nullptr, in which case the read state is still advanced.
|
|
// When TracePacket is not nullptr, ProducerID must also be not null and will
|
|
// be updated with the ProducerID that originally wrote the chunk.
|
|
ReadPacketResult ReadNextPacketInChunk(ChunkMeta*, TracePacket*);
|
|
|
|
void DcheckIsAlignedAndWithinBounds(const uint8_t* ptr) const {
|
|
PERFETTO_DCHECK(ptr >= begin() && ptr <= end() - sizeof(ChunkRecord));
|
|
PERFETTO_DCHECK(
|
|
(reinterpret_cast<uintptr_t>(ptr) & (alignof(ChunkRecord) - 1)) == 0);
|
|
}
|
|
|
|
ChunkRecord* GetChunkRecordAt(uint8_t* ptr) {
|
|
DcheckIsAlignedAndWithinBounds(ptr);
|
|
// We may be accessing a new (empty) record.
|
|
data_.EnsureCommitted(
|
|
static_cast<size_t>(ptr + sizeof(ChunkRecord) - begin()));
|
|
return reinterpret_cast<ChunkRecord*>(ptr);
|
|
}
|
|
|
|
void DiscardWrite();
|
|
|
|
// |src| can be nullptr (in which case |size| must be ==
|
|
// record.size - sizeof(ChunkRecord)), for the case of writing a padding
|
|
// record. |wptr_| is NOT advanced by this function, the caller must do that.
|
|
void WriteChunkRecord(uint8_t* wptr,
|
|
const ChunkRecord& record,
|
|
const uint8_t* src,
|
|
size_t size) {
|
|
// Note: |record.size| will be slightly bigger than |size| because of the
|
|
// ChunkRecord header and rounding, to ensure that all ChunkRecord(s) are
|
|
// multiple of sizeof(ChunkRecord). The invariant is:
|
|
// record.size >= |size| + sizeof(ChunkRecord) (== if no rounding).
|
|
PERFETTO_DCHECK(size <= ChunkRecord::kMaxSize);
|
|
PERFETTO_DCHECK(record.size >= sizeof(record));
|
|
PERFETTO_DCHECK(record.size % sizeof(record) == 0);
|
|
PERFETTO_DCHECK(record.size >= size + sizeof(record));
|
|
PERFETTO_CHECK(record.size <= size_to_end());
|
|
DcheckIsAlignedAndWithinBounds(wptr);
|
|
|
|
// We may be writing to this area for the first time.
|
|
data_.EnsureCommitted(static_cast<size_t>(wptr + record.size - begin()));
|
|
|
|
// Deliberately not a *D*CHECK.
|
|
PERFETTO_CHECK(wptr + sizeof(record) + size <= end());
|
|
memcpy(wptr, &record, sizeof(record));
|
|
if (PERFETTO_LIKELY(src)) {
|
|
// If the producer modifies the data in the shared memory buffer while we
|
|
// are copying it to the central buffer, TSAN will (rightfully) flag that
|
|
// as a race. However the entire purpose of copying the data into the
|
|
// central buffer is that we can validate it without worrying that the
|
|
// producer changes it from under our feet, so this race is benign. The
|
|
// alternative would be to try computing which part of the buffer is safe
|
|
// to read (assuming a well-behaving client), but the risk of introducing
|
|
// a bug that way outweighs the benefit.
|
|
PERFETTO_ANNOTATE_BENIGN_RACE_SIZED(
|
|
src, size, "Benign race when copying chunk from shared memory.")
|
|
memcpy(wptr + sizeof(record), src, size);
|
|
} else {
|
|
PERFETTO_DCHECK(size == record.size - sizeof(record));
|
|
}
|
|
const size_t rounding_size = record.size - sizeof(record) - size;
|
|
memset(wptr + sizeof(record) + size, 0, rounding_size);
|
|
}
|
|
|
|
uint8_t* begin() const { return reinterpret_cast<uint8_t*>(data_.Get()); }
|
|
uint8_t* end() const { return begin() + size_; }
|
|
size_t size_to_end() const { return static_cast<size_t>(end() - wptr_); }
|
|
|
|
base::PagedMemory data_;
|
|
size_t size_ = 0; // Size in bytes of |data_|.
|
|
size_t max_chunk_size_ = 0; // Max size in bytes allowed for a chunk.
|
|
uint8_t* wptr_ = nullptr; // Write pointer.
|
|
|
|
// An index that keeps track of the positions and metadata of each
|
|
// ChunkRecord.
|
|
ChunkMap index_;
|
|
|
|
// Read iterator used for ReadNext(). It is reset by calling BeginRead().
|
|
// It becomes invalid after any call to methods that alters the |index_|.
|
|
SequenceIterator read_iter_;
|
|
|
|
// See comments at the top of the file.
|
|
OverwritePolicy overwrite_policy_ = kOverwrite;
|
|
|
|
// Only used when |overwrite_policy_ == kDiscard|. This is set the first time
|
|
// a write fails because it would overwrite unread chunks.
|
|
bool discard_writes_ = false;
|
|
|
|
// Keeps track of the highest ChunkID written for a given sequence, taking
|
|
// into account a potential overflow of ChunkIDs. In the case of overflow,
|
|
// stores the highest ChunkID written since the overflow.
|
|
//
|
|
// TODO(primiano): should clean up keys from this map. Right now it grows
|
|
// without bounds (although realistically is not a problem unless we have too
|
|
// many producers/writers within the same trace session).
|
|
std::map<std::pair<ProducerID, WriterID>, ChunkID> last_chunk_id_written_;
|
|
|
|
// Statistics about buffer usage.
|
|
TraceStats::BufferStats stats_;
|
|
|
|
#if PERFETTO_DCHECK_IS_ON()
|
|
bool changed_since_last_read_ = false;
|
|
#endif
|
|
|
|
// When true disable some DCHECKs that have been put in place to detect
|
|
// bugs in the producers. This is for tests that feed malicious inputs and
|
|
// hence mimic a buggy producer.
|
|
bool suppress_client_dchecks_for_testing_ = false;
|
|
};
|
|
|
|
} // namespace perfetto
|
|
|
|
#endif // SRC_TRACING_CORE_TRACE_BUFFER_H_
|
|
/*
|
|
* Copyright (C) 2018 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
// gen_amalgamated expanded: #include "src/tracing/core/trace_buffer.h"
|
|
|
|
#include <limits>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/base/logging.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/utils.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/shared_memory_abi.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/trace_packet.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/proto_utils.h"
|
|
|
|
#define TRACE_BUFFER_VERBOSE_LOGGING() 0 // Set to 1 when debugging unittests.
|
|
#if TRACE_BUFFER_VERBOSE_LOGGING()
|
|
#define TRACE_BUFFER_DLOG PERFETTO_DLOG
|
|
namespace {
|
|
constexpr char kHexDigits[] = "0123456789abcdef";
|
|
std::string HexDump(const uint8_t* src, size_t size) {
|
|
std::string buf;
|
|
buf.reserve(4096 * 4);
|
|
char line[64];
|
|
char* c = line;
|
|
for (size_t i = 0; i < size; i++) {
|
|
*c++ = kHexDigits[(src[i] >> 4) & 0x0f];
|
|
*c++ = kHexDigits[(src[i] >> 0) & 0x0f];
|
|
if (i % 16 == 15) {
|
|
buf.append("\n");
|
|
buf.append(line);
|
|
c = line;
|
|
}
|
|
}
|
|
return buf;
|
|
}
|
|
} // namespace
|
|
#else
|
|
#define TRACE_BUFFER_DLOG(...) void()
|
|
#endif
|
|
|
|
namespace perfetto {
|
|
|
|
namespace {
|
|
constexpr uint8_t kFirstPacketContinuesFromPrevChunk =
|
|
SharedMemoryABI::ChunkHeader::kFirstPacketContinuesFromPrevChunk;
|
|
constexpr uint8_t kLastPacketContinuesOnNextChunk =
|
|
SharedMemoryABI::ChunkHeader::kLastPacketContinuesOnNextChunk;
|
|
constexpr uint8_t kChunkNeedsPatching =
|
|
SharedMemoryABI::ChunkHeader::kChunkNeedsPatching;
|
|
} // namespace.
|
|
|
|
constexpr size_t TraceBuffer::ChunkRecord::kMaxSize;
|
|
constexpr size_t TraceBuffer::InlineChunkHeaderSize = sizeof(ChunkRecord);
|
|
|
|
// static
|
|
std::unique_ptr<TraceBuffer> TraceBuffer::Create(size_t size_in_bytes,
|
|
OverwritePolicy pol) {
|
|
std::unique_ptr<TraceBuffer> trace_buffer(new TraceBuffer(pol));
|
|
if (!trace_buffer->Initialize(size_in_bytes))
|
|
return nullptr;
|
|
return trace_buffer;
|
|
}
|
|
|
|
TraceBuffer::TraceBuffer(OverwritePolicy pol) : overwrite_policy_(pol) {
|
|
// See comments in ChunkRecord for the rationale of this.
|
|
static_assert(sizeof(ChunkRecord) == sizeof(SharedMemoryABI::PageHeader) +
|
|
sizeof(SharedMemoryABI::ChunkHeader),
|
|
"ChunkRecord out of sync with the layout of SharedMemoryABI");
|
|
}
|
|
|
|
TraceBuffer::~TraceBuffer() = default;
|
|
|
|
bool TraceBuffer::Initialize(size_t size) {
|
|
static_assert(
|
|
SharedMemoryABI::kMinPageSize % sizeof(ChunkRecord) == 0,
|
|
"sizeof(ChunkRecord) must be an integer divider of a page size");
|
|
data_ = base::PagedMemory::Allocate(
|
|
size, base::PagedMemory::kMayFail | base::PagedMemory::kDontCommit);
|
|
if (!data_.IsValid()) {
|
|
PERFETTO_ELOG("Trace buffer allocation failed (size: %zu)", size);
|
|
return false;
|
|
}
|
|
size_ = size;
|
|
stats_.set_buffer_size(size);
|
|
max_chunk_size_ = std::min(size, ChunkRecord::kMaxSize);
|
|
wptr_ = begin();
|
|
index_.clear();
|
|
last_chunk_id_written_.clear();
|
|
read_iter_ = GetReadIterForSequence(index_.end());
|
|
return true;
|
|
}
|
|
|
|
// Note: |src| points to a shmem region that is shared with the producer. Assume
|
|
// that the producer is malicious and will change the content of |src|
|
|
// while we execute here. Don't do any processing on it other than memcpy().
|
|
void TraceBuffer::CopyChunkUntrusted(ProducerID producer_id_trusted,
|
|
uid_t producer_uid_trusted,
|
|
WriterID writer_id,
|
|
ChunkID chunk_id,
|
|
uint16_t num_fragments,
|
|
uint8_t chunk_flags,
|
|
bool chunk_complete,
|
|
const uint8_t* src,
|
|
size_t size) {
|
|
// |record_size| = |size| + sizeof(ChunkRecord), rounded up to avoid to end
|
|
// up in a fragmented state where size_to_end() < sizeof(ChunkRecord).
|
|
const size_t record_size =
|
|
base::AlignUp<sizeof(ChunkRecord)>(size + sizeof(ChunkRecord));
|
|
if (PERFETTO_UNLIKELY(record_size > max_chunk_size_)) {
|
|
stats_.set_abi_violations(stats_.abi_violations() + 1);
|
|
PERFETTO_DCHECK(suppress_client_dchecks_for_testing_);
|
|
return;
|
|
}
|
|
|
|
TRACE_BUFFER_DLOG("CopyChunk @ %lu, size=%zu", wptr_ - begin(), record_size);
|
|
|
|
#if PERFETTO_DCHECK_IS_ON()
|
|
changed_since_last_read_ = true;
|
|
#endif
|
|
|
|
// If the chunk hasn't been completed, we should only consider the first
|
|
// |num_fragments - 1| packets complete. For simplicity, we simply disregard
|
|
// the last one when we copy the chunk.
|
|
if (PERFETTO_UNLIKELY(!chunk_complete)) {
|
|
if (num_fragments > 0) {
|
|
num_fragments--;
|
|
// These flags should only affect the last packet in the chunk. We clear
|
|
// them, so that TraceBuffer is able to look at the remaining packets in
|
|
// this chunk.
|
|
chunk_flags &= ~kLastPacketContinuesOnNextChunk;
|
|
chunk_flags &= ~kChunkNeedsPatching;
|
|
}
|
|
}
|
|
|
|
ChunkRecord record(record_size);
|
|
record.producer_id = producer_id_trusted;
|
|
record.chunk_id = chunk_id;
|
|
record.writer_id = writer_id;
|
|
record.num_fragments = num_fragments;
|
|
record.flags = chunk_flags;
|
|
ChunkMeta::Key key(record);
|
|
|
|
// Check whether we have already copied the same chunk previously. This may
|
|
// happen if the service scrapes chunks in a potentially incomplete state
|
|
// before receiving commit requests for them from the producer. Note that the
|
|
// service may scrape and thus override chunks in arbitrary order since the
|
|
// chunks aren't ordered in the SMB.
|
|
const auto it = index_.find(key);
|
|
if (PERFETTO_UNLIKELY(it != index_.end())) {
|
|
ChunkMeta* record_meta = &it->second;
|
|
ChunkRecord* prev = record_meta->chunk_record;
|
|
|
|
// Verify that the old chunk's metadata corresponds to the new one.
|
|
// Overridden chunks should never change size, since the page layout is
|
|
// fixed per writer. The number of fragments should also never decrease and
|
|
// flags should not be removed.
|
|
if (PERFETTO_UNLIKELY(ChunkMeta::Key(*prev) != key ||
|
|
prev->size != record_size ||
|
|
prev->num_fragments > num_fragments ||
|
|
(prev->flags & chunk_flags) != prev->flags)) {
|
|
stats_.set_abi_violations(stats_.abi_violations() + 1);
|
|
PERFETTO_DCHECK(suppress_client_dchecks_for_testing_);
|
|
return;
|
|
}
|
|
|
|
// If we've already started reading from chunk N+1 following this chunk N,
|
|
// don't override chunk N. Otherwise we may end up reading a packet from
|
|
// chunk N after having read from chunk N+1, thereby violating sequential
|
|
// read of packets. This shouldn't happen if the producer is well-behaved,
|
|
// because it shouldn't start chunk N+1 before completing chunk N.
|
|
ChunkMeta::Key subsequent_key = key;
|
|
static_assert(std::numeric_limits<ChunkID>::max() == kMaxChunkID,
|
|
"ChunkID wraps");
|
|
subsequent_key.chunk_id++;
|
|
const auto subsequent_it = index_.find(subsequent_key);
|
|
if (subsequent_it != index_.end() &&
|
|
subsequent_it->second.num_fragments_read > 0) {
|
|
stats_.set_abi_violations(stats_.abi_violations() + 1);
|
|
PERFETTO_DCHECK(suppress_client_dchecks_for_testing_);
|
|
return;
|
|
}
|
|
|
|
// If this chunk was previously copied with the same number of fragments and
|
|
// the number didn't change, there's no need to copy it again. If the
|
|
// previous chunk was complete already, this should always be the case.
|
|
PERFETTO_DCHECK(suppress_client_dchecks_for_testing_ ||
|
|
!record_meta->is_complete() ||
|
|
(chunk_complete && prev->num_fragments == num_fragments));
|
|
if (prev->num_fragments == num_fragments) {
|
|
TRACE_BUFFER_DLOG(" skipping recommit of identical chunk");
|
|
return;
|
|
}
|
|
|
|
// We should not have read past the last packet.
|
|
if (record_meta->num_fragments_read > prev->num_fragments) {
|
|
PERFETTO_ELOG(
|
|
"TraceBuffer read too many fragments from an incomplete chunk");
|
|
PERFETTO_DCHECK(suppress_client_dchecks_for_testing_);
|
|
return;
|
|
}
|
|
|
|
uint8_t* wptr = reinterpret_cast<uint8_t*>(prev);
|
|
TRACE_BUFFER_DLOG(" overriding chunk @ %lu, size=%zu", wptr - begin(),
|
|
record_size);
|
|
|
|
// Update chunk meta data stored in the index, as it may have changed.
|
|
record_meta->num_fragments = num_fragments;
|
|
record_meta->flags = chunk_flags;
|
|
record_meta->set_complete(chunk_complete);
|
|
|
|
// Override the ChunkRecord contents at the original |wptr|.
|
|
TRACE_BUFFER_DLOG(" copying @ [%lu - %lu] %zu", wptr - begin(),
|
|
uintptr_t(wptr - begin()) + record_size, record_size);
|
|
WriteChunkRecord(wptr, record, src, size);
|
|
TRACE_BUFFER_DLOG("Chunk raw: %s", HexDump(wptr, record_size).c_str());
|
|
stats_.set_chunks_rewritten(stats_.chunks_rewritten() + 1);
|
|
return;
|
|
}
|
|
|
|
if (PERFETTO_UNLIKELY(discard_writes_))
|
|
return DiscardWrite();
|
|
|
|
// If there isn't enough room from the given write position. Write a padding
|
|
// record to clear the end of the buffer and wrap back.
|
|
const size_t cached_size_to_end = size_to_end();
|
|
if (PERFETTO_UNLIKELY(record_size > cached_size_to_end)) {
|
|
ssize_t res = DeleteNextChunksFor(cached_size_to_end);
|
|
if (res == -1)
|
|
return DiscardWrite();
|
|
PERFETTO_DCHECK(static_cast<size_t>(res) <= cached_size_to_end);
|
|
AddPaddingRecord(cached_size_to_end);
|
|
wptr_ = begin();
|
|
stats_.set_write_wrap_count(stats_.write_wrap_count() + 1);
|
|
PERFETTO_DCHECK(size_to_end() >= record_size);
|
|
}
|
|
|
|
// At this point either |wptr_| points to an untouched part of the buffer
|
|
// (i.e. *wptr_ == 0) or we are about to overwrite one or more ChunkRecord(s).
|
|
// In the latter case we need to first figure out where the next valid
|
|
// ChunkRecord is (if it exists) and add padding between the new record.
|
|
// Example ((w) == write cursor):
|
|
//
|
|
// Initial state (wtpr_ == 0):
|
|
// |0 (w) |10 |30 |50
|
|
// +---------+-----------------+--------------------+--------------------+
|
|
// | Chunk 1 | Chunk 2 | Chunk 3 | Chunk 4 |
|
|
// +---------+-----------------+--------------------+--------------------+
|
|
//
|
|
// Let's assume we now want now write a 5th Chunk of size == 35. The final
|
|
// state should look like this:
|
|
// |0 |35 (w) |50
|
|
// +---------------------------------+---------------+--------------------+
|
|
// | Chunk 5 | Padding Chunk | Chunk 4 |
|
|
// +---------------------------------+---------------+--------------------+
|
|
|
|
// Deletes all chunks from |wptr_| to |wptr_| + |record_size|.
|
|
ssize_t del_res = DeleteNextChunksFor(record_size);
|
|
if (del_res == -1)
|
|
return DiscardWrite();
|
|
size_t padding_size = static_cast<size_t>(del_res);
|
|
|
|
// Now first insert the new chunk. At the end, if necessary, add the padding.
|
|
stats_.set_chunks_written(stats_.chunks_written() + 1);
|
|
stats_.set_bytes_written(stats_.bytes_written() + record_size);
|
|
auto it_and_inserted = index_.emplace(
|
|
key, ChunkMeta(GetChunkRecordAt(wptr_), num_fragments, chunk_complete,
|
|
chunk_flags, producer_uid_trusted));
|
|
PERFETTO_DCHECK(it_and_inserted.second);
|
|
TRACE_BUFFER_DLOG(" copying @ [%lu - %lu] %zu", wptr_ - begin(),
|
|
uintptr_t(wptr_ - begin()) + record_size, record_size);
|
|
WriteChunkRecord(wptr_, record, src, size);
|
|
TRACE_BUFFER_DLOG("Chunk raw: %s", HexDump(wptr_, record_size).c_str());
|
|
wptr_ += record_size;
|
|
if (wptr_ >= end()) {
|
|
PERFETTO_DCHECK(padding_size == 0);
|
|
wptr_ = begin();
|
|
stats_.set_write_wrap_count(stats_.write_wrap_count() + 1);
|
|
}
|
|
DcheckIsAlignedAndWithinBounds(wptr_);
|
|
|
|
// Chunks may be received out of order, so only update last_chunk_id if the
|
|
// new chunk_id is larger. But take into account overflows by only selecting
|
|
// the new ID if its distance to the latest ID is smaller than half the number
|
|
// space.
|
|
//
|
|
// This accounts for both the case where the new ID has just overflown and
|
|
// last_chunk_id be updated even though it's smaller (e.g. |chunk_id| = 1 and
|
|
// |last_chunk_id| = kMaxChunkId; chunk_id - last_chunk_id = 0) and the case
|
|
// where the new ID is an out-of-order ID right after an overflow and
|
|
// last_chunk_id shouldn't be updated even though it's larger (e.g. |chunk_id|
|
|
// = kMaxChunkId and |last_chunk_id| = 1; chunk_id - last_chunk_id =
|
|
// kMaxChunkId - 1).
|
|
auto producer_and_writer_id = std::make_pair(producer_id_trusted, writer_id);
|
|
ChunkID& last_chunk_id = last_chunk_id_written_[producer_and_writer_id];
|
|
static_assert(std::numeric_limits<ChunkID>::max() == kMaxChunkID,
|
|
"This code assumes that ChunkID wraps at kMaxChunkID");
|
|
if (chunk_id - last_chunk_id < kMaxChunkID / 2) {
|
|
last_chunk_id = chunk_id;
|
|
} else {
|
|
stats_.set_chunks_committed_out_of_order(
|
|
stats_.chunks_committed_out_of_order() + 1);
|
|
}
|
|
|
|
if (padding_size)
|
|
AddPaddingRecord(padding_size);
|
|
}
|
|
|
|
ssize_t TraceBuffer::DeleteNextChunksFor(size_t bytes_to_clear) {
|
|
PERFETTO_CHECK(!discard_writes_);
|
|
|
|
// Find the position of the first chunk which begins at or after
|
|
// (|wptr_| + |bytes|). Note that such a chunk might not exist and we might
|
|
// either reach the end of the buffer or a zeroed region of the buffer.
|
|
uint8_t* next_chunk_ptr = wptr_;
|
|
uint8_t* search_end = wptr_ + bytes_to_clear;
|
|
TRACE_BUFFER_DLOG("Delete [%zu %zu]", wptr_ - begin(), search_end - begin());
|
|
DcheckIsAlignedAndWithinBounds(wptr_);
|
|
PERFETTO_DCHECK(search_end <= end());
|
|
std::vector<ChunkMap::iterator> index_delete;
|
|
uint64_t chunks_overwritten = stats_.chunks_overwritten();
|
|
uint64_t bytes_overwritten = stats_.bytes_overwritten();
|
|
uint64_t padding_bytes_cleared = stats_.padding_bytes_cleared();
|
|
while (next_chunk_ptr < search_end) {
|
|
const ChunkRecord& next_chunk = *GetChunkRecordAt(next_chunk_ptr);
|
|
TRACE_BUFFER_DLOG(
|
|
" scanning chunk [%zu %zu] (valid=%d)", next_chunk_ptr - begin(),
|
|
next_chunk_ptr - begin() + next_chunk.size, next_chunk.is_valid());
|
|
|
|
// We just reached the untouched part of the buffer, it's going to be all
|
|
// zeroes from here to end().
|
|
// Optimization: if during Initialize() we fill the buffer with padding
|
|
// records we could get rid of this branch.
|
|
if (PERFETTO_UNLIKELY(!next_chunk.is_valid())) {
|
|
// This should happen only at the first iteration. The zeroed area can
|
|
// only begin precisely at the |wptr_|, not after. Otherwise it means that
|
|
// we wrapped but screwed up the ChunkRecord chain.
|
|
PERFETTO_DCHECK(next_chunk_ptr == wptr_);
|
|
return 0;
|
|
}
|
|
|
|
// Remove |next_chunk| from the index, unless it's a padding record (padding
|
|
// records are not part of the index).
|
|
if (PERFETTO_LIKELY(!next_chunk.is_padding)) {
|
|
ChunkMeta::Key key(next_chunk);
|
|
auto it = index_.find(key);
|
|
bool will_remove = false;
|
|
if (PERFETTO_LIKELY(it != index_.end())) {
|
|
const ChunkMeta& meta = it->second;
|
|
if (PERFETTO_UNLIKELY(meta.num_fragments_read < meta.num_fragments)) {
|
|
if (overwrite_policy_ == kDiscard)
|
|
return -1;
|
|
chunks_overwritten++;
|
|
bytes_overwritten += next_chunk.size;
|
|
}
|
|
index_delete.push_back(it);
|
|
will_remove = true;
|
|
}
|
|
TRACE_BUFFER_DLOG(
|
|
" del index {%" PRIu32 ",%" PRIu32 ",%u} @ [%lu - %lu] %d",
|
|
key.producer_id, key.writer_id, key.chunk_id,
|
|
next_chunk_ptr - begin(), next_chunk_ptr - begin() + next_chunk.size,
|
|
will_remove);
|
|
PERFETTO_DCHECK(will_remove);
|
|
} else {
|
|
padding_bytes_cleared += next_chunk.size;
|
|
}
|
|
|
|
next_chunk_ptr += next_chunk.size;
|
|
|
|
// We should never hit this, unless we managed to screw up while writing
|
|
// to the buffer and breaking the ChunkRecord(s) chain.
|
|
// TODO(primiano): Write more meaningful logging with the status of the
|
|
// buffer, to get more actionable bugs in case we hit this.
|
|
PERFETTO_CHECK(next_chunk_ptr <= end());
|
|
}
|
|
|
|
// Remove from the index.
|
|
for (auto it : index_delete) {
|
|
index_.erase(it);
|
|
}
|
|
stats_.set_chunks_overwritten(chunks_overwritten);
|
|
stats_.set_bytes_overwritten(bytes_overwritten);
|
|
stats_.set_padding_bytes_cleared(padding_bytes_cleared);
|
|
|
|
PERFETTO_DCHECK(next_chunk_ptr >= search_end && next_chunk_ptr <= end());
|
|
return static_cast<ssize_t>(next_chunk_ptr - search_end);
|
|
}
|
|
|
|
void TraceBuffer::AddPaddingRecord(size_t size) {
|
|
PERFETTO_DCHECK(size >= sizeof(ChunkRecord) && size <= ChunkRecord::kMaxSize);
|
|
ChunkRecord record(size);
|
|
record.is_padding = 1;
|
|
TRACE_BUFFER_DLOG("AddPaddingRecord @ [%lu - %lu] %zu", wptr_ - begin(),
|
|
uintptr_t(wptr_ - begin()) + size, size);
|
|
WriteChunkRecord(wptr_, record, nullptr, size - sizeof(ChunkRecord));
|
|
stats_.set_padding_bytes_written(stats_.padding_bytes_written() + size);
|
|
// |wptr_| is deliberately not advanced when writing a padding record.
|
|
}
|
|
|
|
bool TraceBuffer::TryPatchChunkContents(ProducerID producer_id,
|
|
WriterID writer_id,
|
|
ChunkID chunk_id,
|
|
const Patch* patches,
|
|
size_t patches_size,
|
|
bool other_patches_pending) {
|
|
ChunkMeta::Key key(producer_id, writer_id, chunk_id);
|
|
auto it = index_.find(key);
|
|
if (it == index_.end()) {
|
|
stats_.set_patches_failed(stats_.patches_failed() + 1);
|
|
return false;
|
|
}
|
|
ChunkMeta& chunk_meta = it->second;
|
|
|
|
// Check that the index is consistent with the actual ProducerID/WriterID
|
|
// stored in the ChunkRecord.
|
|
PERFETTO_DCHECK(ChunkMeta::Key(*chunk_meta.chunk_record) == key);
|
|
uint8_t* chunk_begin = reinterpret_cast<uint8_t*>(chunk_meta.chunk_record);
|
|
PERFETTO_DCHECK(chunk_begin >= begin());
|
|
uint8_t* chunk_end = chunk_begin + chunk_meta.chunk_record->size;
|
|
PERFETTO_DCHECK(chunk_end <= end());
|
|
|
|
static_assert(Patch::kSize == SharedMemoryABI::kPacketHeaderSize,
|
|
"Patch::kSize out of sync with SharedMemoryABI");
|
|
|
|
for (size_t i = 0; i < patches_size; i++) {
|
|
uint8_t* ptr =
|
|
chunk_begin + sizeof(ChunkRecord) + patches[i].offset_untrusted;
|
|
TRACE_BUFFER_DLOG("PatchChunk {%" PRIu32 ",%" PRIu32
|
|
",%u} size=%zu @ %zu with {%02x %02x %02x %02x} cur "
|
|
"{%02x %02x %02x %02x}",
|
|
producer_id, writer_id, chunk_id, chunk_end - chunk_begin,
|
|
patches[i].offset_untrusted, patches[i].data[0],
|
|
patches[i].data[1], patches[i].data[2],
|
|
patches[i].data[3], ptr[0], ptr[1], ptr[2], ptr[3]);
|
|
if (ptr < chunk_begin + sizeof(ChunkRecord) ||
|
|
ptr > chunk_end - Patch::kSize) {
|
|
// Either the IPC was so slow and in the meantime the writer managed to
|
|
// wrap over |chunk_id| or the producer sent a malicious IPC.
|
|
stats_.set_patches_failed(stats_.patches_failed() + 1);
|
|
return false;
|
|
}
|
|
|
|
// DCHECK that we are writing into a zero-filled size field and not into
|
|
// valid data. It relies on ScatteredStreamWriter::ReserveBytes() to
|
|
// zero-fill reservations in debug builds.
|
|
char zero[Patch::kSize]{};
|
|
PERFETTO_DCHECK(memcmp(ptr, &zero, Patch::kSize) == 0);
|
|
|
|
memcpy(ptr, &patches[i].data[0], Patch::kSize);
|
|
}
|
|
TRACE_BUFFER_DLOG(
|
|
"Chunk raw (after patch): %s",
|
|
HexDump(chunk_begin, chunk_meta.chunk_record->size).c_str());
|
|
|
|
stats_.set_patches_succeeded(stats_.patches_succeeded() + patches_size);
|
|
if (!other_patches_pending) {
|
|
chunk_meta.flags &= ~kChunkNeedsPatching;
|
|
chunk_meta.chunk_record->flags = chunk_meta.flags;
|
|
}
|
|
return true;
|
|
}
|
|
|
|
void TraceBuffer::BeginRead() {
|
|
read_iter_ = GetReadIterForSequence(index_.begin());
|
|
#if PERFETTO_DCHECK_IS_ON()
|
|
changed_since_last_read_ = false;
|
|
#endif
|
|
}
|
|
|
|
TraceBuffer::SequenceIterator TraceBuffer::GetReadIterForSequence(
|
|
ChunkMap::iterator seq_begin) {
|
|
SequenceIterator iter;
|
|
iter.seq_begin = seq_begin;
|
|
if (seq_begin == index_.end()) {
|
|
iter.cur = iter.seq_end = index_.end();
|
|
return iter;
|
|
}
|
|
|
|
#if PERFETTO_DCHECK_IS_ON()
|
|
// Either |seq_begin| is == index_.begin() or the item immediately before must
|
|
// belong to a different {ProducerID, WriterID} sequence.
|
|
if (seq_begin != index_.begin() && seq_begin != index_.end()) {
|
|
auto prev_it = seq_begin;
|
|
prev_it--;
|
|
PERFETTO_DCHECK(
|
|
seq_begin == index_.begin() ||
|
|
std::tie(prev_it->first.producer_id, prev_it->first.writer_id) <
|
|
std::tie(seq_begin->first.producer_id, seq_begin->first.writer_id));
|
|
}
|
|
#endif
|
|
|
|
// Find the first entry that has a greater {ProducerID, WriterID} (or just
|
|
// index_.end() if we reached the end).
|
|
ChunkMeta::Key key = seq_begin->first; // Deliberate copy.
|
|
key.chunk_id = kMaxChunkID;
|
|
iter.seq_end = index_.upper_bound(key);
|
|
PERFETTO_DCHECK(iter.seq_begin != iter.seq_end);
|
|
|
|
// Now find the first entry between [seq_begin, seq_end) that is
|
|
// > last_chunk_id_written_. This is where we the sequence will start (see
|
|
// notes about wrapping of IDs in the header).
|
|
auto producer_and_writer_id = std::make_pair(key.producer_id, key.writer_id);
|
|
PERFETTO_DCHECK(last_chunk_id_written_.count(producer_and_writer_id));
|
|
iter.wrapping_id = last_chunk_id_written_[producer_and_writer_id];
|
|
key.chunk_id = iter.wrapping_id;
|
|
iter.cur = index_.upper_bound(key);
|
|
if (iter.cur == iter.seq_end)
|
|
iter.cur = iter.seq_begin;
|
|
return iter;
|
|
}
|
|
|
|
void TraceBuffer::SequenceIterator::MoveNext() {
|
|
// Stop iterating when we reach the end of the sequence.
|
|
// Note: |seq_begin| might be == |seq_end|.
|
|
if (cur == seq_end || cur->first.chunk_id == wrapping_id) {
|
|
cur = seq_end;
|
|
return;
|
|
}
|
|
|
|
// If the current chunk wasn't completed yet, we shouldn't advance past it as
|
|
// it may be rewritten with additional packets.
|
|
if (!cur->second.is_complete()) {
|
|
cur = seq_end;
|
|
return;
|
|
}
|
|
|
|
ChunkID last_chunk_id = cur->first.chunk_id;
|
|
if (++cur == seq_end)
|
|
cur = seq_begin;
|
|
|
|
// There may be a missing chunk in the sequence of chunks, in which case the
|
|
// next chunk's ID won't follow the last one's. If so, skip the rest of the
|
|
// sequence. We'll return to it later once the hole is filled.
|
|
if (last_chunk_id + 1 != cur->first.chunk_id)
|
|
cur = seq_end;
|
|
}
|
|
|
|
bool TraceBuffer::ReadNextTracePacket(
|
|
TracePacket* packet,
|
|
PacketSequenceProperties* sequence_properties,
|
|
bool* previous_packet_on_sequence_dropped) {
|
|
// Note: MoveNext() moves only within the next chunk within the same
|
|
// {ProducerID, WriterID} sequence. Here we want to:
|
|
// - return the next patched+complete packet in the current sequence, if any.
|
|
// - return the first patched+complete packet in the next sequence, if any.
|
|
// - return false if none of the above is found.
|
|
TRACE_BUFFER_DLOG("ReadNextTracePacket()");
|
|
|
|
// Just in case we forget to initialize these below.
|
|
*sequence_properties = {0, kInvalidUid, 0};
|
|
*previous_packet_on_sequence_dropped = false;
|
|
|
|
// At the start of each sequence iteration, we consider the last read packet
|
|
// dropped. While iterating over the chunks in the sequence, we update this
|
|
// flag based on our knowledge about the last packet that was read from each
|
|
// chunk (|last_read_packet_skipped| in ChunkMeta).
|
|
bool previous_packet_dropped = true;
|
|
|
|
#if PERFETTO_DCHECK_IS_ON()
|
|
PERFETTO_DCHECK(!changed_since_last_read_);
|
|
#endif
|
|
for (;; read_iter_.MoveNext()) {
|
|
if (PERFETTO_UNLIKELY(!read_iter_.is_valid())) {
|
|
// We ran out of chunks in the current {ProducerID, WriterID} sequence or
|
|
// we just reached the index_.end().
|
|
|
|
if (PERFETTO_UNLIKELY(read_iter_.seq_end == index_.end()))
|
|
return false;
|
|
|
|
// We reached the end of sequence, move to the next one.
|
|
// Note: ++read_iter_.seq_end might become index_.end(), but
|
|
// GetReadIterForSequence() knows how to deal with that.
|
|
read_iter_ = GetReadIterForSequence(read_iter_.seq_end);
|
|
PERFETTO_DCHECK(read_iter_.is_valid() && read_iter_.cur != index_.end());
|
|
previous_packet_dropped = true;
|
|
}
|
|
|
|
ChunkMeta* chunk_meta = &*read_iter_;
|
|
|
|
// If the chunk has holes that are awaiting to be patched out-of-band,
|
|
// skip the current sequence and move to the next one.
|
|
if (chunk_meta->flags & kChunkNeedsPatching) {
|
|
read_iter_.MoveToEnd();
|
|
continue;
|
|
}
|
|
|
|
const ProducerID trusted_producer_id = read_iter_.producer_id();
|
|
const WriterID writer_id = read_iter_.writer_id();
|
|
const uid_t trusted_uid = chunk_meta->trusted_uid;
|
|
|
|
// At this point we have a chunk in |chunk_meta| that has not been fully
|
|
// read. We don't know yet whether we have enough data to read the full
|
|
// packet (in the case it's fragmented over several chunks) and we are about
|
|
// to find that out. Specifically:
|
|
// A) If the first fragment is unread and is a fragment continuing from a
|
|
// previous chunk, it means we have missed the previous ChunkID. In
|
|
// fact, if this wasn't the case, a previous call to ReadNext() shouldn't
|
|
// have moved the cursor to this chunk.
|
|
// B) Any fragment > 0 && < last is always readable. By definition an inner
|
|
// packet is never fragmented and hence doesn't require neither stitching
|
|
// nor any out-of-band patching. The same applies to the last packet
|
|
// iff it doesn't continue on the next chunk.
|
|
// C) If the last packet (which might be also the only packet in the chunk)
|
|
// is a fragment and continues on the next chunk, we peek at the next
|
|
// chunks and, if we have all of them, mark as read and move the cursor.
|
|
//
|
|
// +---------------+ +-------------------+ +---------------+
|
|
// | ChunkID: 1 | | ChunkID: 2 | | ChunkID: 3 |
|
|
// |---------------+ +-------------------+ +---------------+
|
|
// | Packet 1 | | | | ... Packet 3 |
|
|
// | Packet 2 | | ... Packet 3 ... | | Packet 4 |
|
|
// | Packet 3 ... | | | | Packet 5 ... |
|
|
// +---------------+ +-------------------+ +---------------+
|
|
|
|
PERFETTO_DCHECK(chunk_meta->num_fragments_read <=
|
|
chunk_meta->num_fragments);
|
|
|
|
// If we didn't read any packets from this chunk, the last packet was from
|
|
// the previous chunk we iterated over; so don't update
|
|
// |previous_packet_dropped| in this case.
|
|
if (chunk_meta->num_fragments_read > 0)
|
|
previous_packet_dropped = chunk_meta->last_read_packet_skipped();
|
|
|
|
while (chunk_meta->num_fragments_read < chunk_meta->num_fragments) {
|
|
enum { kSkip = 0, kReadOnePacket, kTryReadAhead } action;
|
|
if (chunk_meta->num_fragments_read == 0) {
|
|
if (chunk_meta->flags & kFirstPacketContinuesFromPrevChunk) {
|
|
action = kSkip; // Case A.
|
|
} else if (chunk_meta->num_fragments == 1 &&
|
|
(chunk_meta->flags & kLastPacketContinuesOnNextChunk)) {
|
|
action = kTryReadAhead; // Case C.
|
|
} else {
|
|
action = kReadOnePacket; // Case B.
|
|
}
|
|
} else if (chunk_meta->num_fragments_read <
|
|
chunk_meta->num_fragments - 1 ||
|
|
!(chunk_meta->flags & kLastPacketContinuesOnNextChunk)) {
|
|
action = kReadOnePacket; // Case B.
|
|
} else {
|
|
action = kTryReadAhead; // Case C.
|
|
}
|
|
|
|
TRACE_BUFFER_DLOG(" chunk %u, packet %hu of %hu, action=%d",
|
|
read_iter_.chunk_id(), chunk_meta->num_fragments_read,
|
|
chunk_meta->num_fragments, action);
|
|
|
|
if (action == kSkip) {
|
|
// This fragment will be skipped forever, not just in this ReadPacket()
|
|
// iteration. This happens by virtue of ReadNextPacketInChunk()
|
|
// incrementing the |num_fragments_read| and marking the fragment as
|
|
// read even if we didn't really.
|
|
ReadNextPacketInChunk(chunk_meta, nullptr);
|
|
chunk_meta->set_last_read_packet_skipped(true);
|
|
previous_packet_dropped = true;
|
|
continue;
|
|
}
|
|
|
|
if (action == kReadOnePacket) {
|
|
// The easy peasy case B.
|
|
ReadPacketResult result = ReadNextPacketInChunk(chunk_meta, packet);
|
|
|
|
if (PERFETTO_LIKELY(result == ReadPacketResult::kSucceeded)) {
|
|
*sequence_properties = {trusted_producer_id, trusted_uid, writer_id};
|
|
*previous_packet_on_sequence_dropped = previous_packet_dropped;
|
|
return true;
|
|
} else if (result == ReadPacketResult::kFailedEmptyPacket) {
|
|
// We can ignore and skip empty packets.
|
|
PERFETTO_DCHECK(packet->slices().empty());
|
|
continue;
|
|
}
|
|
|
|
// In extremely rare cases (producer bugged / malicious) the chunk might
|
|
// contain an invalid fragment. In such case we don't want to stall the
|
|
// sequence but just skip the chunk and move on. ReadNextPacketInChunk()
|
|
// marks the chunk as fully read, so we don't attempt to read from it
|
|
// again in a future call to ReadBuffers(). It also already records an
|
|
// abi violation for this.
|
|
PERFETTO_DCHECK(result == ReadPacketResult::kFailedInvalidPacket);
|
|
chunk_meta->set_last_read_packet_skipped(true);
|
|
previous_packet_dropped = true;
|
|
break;
|
|
}
|
|
|
|
PERFETTO_DCHECK(action == kTryReadAhead);
|
|
ReadAheadResult ra_res = ReadAhead(packet);
|
|
if (ra_res == ReadAheadResult::kSucceededReturnSlices) {
|
|
stats_.set_readaheads_succeeded(stats_.readaheads_succeeded() + 1);
|
|
*sequence_properties = {trusted_producer_id, trusted_uid, writer_id};
|
|
*previous_packet_on_sequence_dropped = previous_packet_dropped;
|
|
return true;
|
|
}
|
|
|
|
if (ra_res == ReadAheadResult::kFailedMoveToNextSequence) {
|
|
// readahead didn't find a contiguous packet sequence. We'll try again
|
|
// on the next ReadPacket() call.
|
|
stats_.set_readaheads_failed(stats_.readaheads_failed() + 1);
|
|
|
|
// TODO(primiano): optimization: this MoveToEnd() is the reason why
|
|
// MoveNext() (that is called in the outer for(;;MoveNext)) needs to
|
|
// deal gracefully with the case of |cur|==|seq_end|. Maybe we can do
|
|
// something to avoid that check by reshuffling the code here?
|
|
read_iter_.MoveToEnd();
|
|
|
|
// This break will go back to beginning of the for(;;MoveNext()). That
|
|
// will move to the next sequence because we set the read iterator to
|
|
// its end.
|
|
break;
|
|
}
|
|
|
|
PERFETTO_DCHECK(ra_res == ReadAheadResult::kFailedStayOnSameSequence);
|
|
|
|
// In this case ReadAhead() might advance |read_iter_|, so we need to
|
|
// re-cache the |chunk_meta| pointer to point to the current chunk.
|
|
chunk_meta = &*read_iter_;
|
|
chunk_meta->set_last_read_packet_skipped(true);
|
|
previous_packet_dropped = true;
|
|
} // while(...) [iterate over packet fragments for the current chunk].
|
|
} // for(;;MoveNext()) [iterate over chunks].
|
|
}
|
|
|
|
TraceBuffer::ReadAheadResult TraceBuffer::ReadAhead(TracePacket* packet) {
|
|
static_assert(static_cast<ChunkID>(kMaxChunkID + 1) == 0,
|
|
"relying on kMaxChunkID to wrap naturally");
|
|
TRACE_BUFFER_DLOG(" readahead start @ chunk %u", read_iter_.chunk_id());
|
|
ChunkID next_chunk_id = read_iter_.chunk_id() + 1;
|
|
SequenceIterator it = read_iter_;
|
|
for (it.MoveNext(); it.is_valid(); it.MoveNext(), next_chunk_id++) {
|
|
// We should stay within the same sequence while iterating here.
|
|
PERFETTO_DCHECK(it.producer_id() == read_iter_.producer_id() &&
|
|
it.writer_id() == read_iter_.writer_id());
|
|
|
|
TRACE_BUFFER_DLOG(" expected chunk ID: %u, actual ID: %u", next_chunk_id,
|
|
it.chunk_id());
|
|
|
|
if (PERFETTO_UNLIKELY((*it).num_fragments == 0))
|
|
continue;
|
|
|
|
// If we miss the next chunk, stop looking in the current sequence and
|
|
// try another sequence. This chunk might come in the near future.
|
|
// The second condition is the edge case of a buggy/malicious
|
|
// producer. The ChunkID is contiguous but its flags don't make sense.
|
|
if (it.chunk_id() != next_chunk_id ||
|
|
PERFETTO_UNLIKELY(
|
|
!((*it).flags & kFirstPacketContinuesFromPrevChunk))) {
|
|
return ReadAheadResult::kFailedMoveToNextSequence;
|
|
}
|
|
|
|
// If the chunk is contiguous but has not been patched yet move to the next
|
|
// sequence and try coming back here on the next ReadNextTracePacket() call.
|
|
// TODO(primiano): add a test to cover this, it's a subtle case.
|
|
if ((*it).flags & kChunkNeedsPatching)
|
|
return ReadAheadResult::kFailedMoveToNextSequence;
|
|
|
|
// This is the case of an intermediate chunk which contains only one
|
|
// fragment which continues on the next chunk. This is the case for large
|
|
// packets, e.g.: [Packet0, Packet1(0)] [Packet1(1)] [Packet1(2), ...]
|
|
// (Packet1(X) := fragment X of Packet1).
|
|
if ((*it).num_fragments == 1 &&
|
|
((*it).flags & kLastPacketContinuesOnNextChunk)) {
|
|
continue;
|
|
}
|
|
|
|
// We made it! We got all fragments for the packet without holes.
|
|
TRACE_BUFFER_DLOG(" readahead success @ chunk %u", it.chunk_id());
|
|
PERFETTO_DCHECK(((*it).num_fragments == 1 &&
|
|
!((*it).flags & kLastPacketContinuesOnNextChunk)) ||
|
|
(*it).num_fragments > 1);
|
|
|
|
// Now let's re-iterate over the [read_iter_, it] sequence and mark
|
|
// all the fragments as read.
|
|
bool packet_corruption = false;
|
|
for (;;) {
|
|
PERFETTO_DCHECK(read_iter_.is_valid());
|
|
TRACE_BUFFER_DLOG(" commit chunk %u", read_iter_.chunk_id());
|
|
if (PERFETTO_LIKELY((*read_iter_).num_fragments > 0)) {
|
|
// In the unlikely case of a corrupted packet (corrupted or empty
|
|
// fragment), invalidate the all stitching and move on to the next chunk
|
|
// in the same sequence, if any.
|
|
packet_corruption |= ReadNextPacketInChunk(&*read_iter_, packet) ==
|
|
ReadPacketResult::kFailedInvalidPacket;
|
|
}
|
|
if (read_iter_.cur == it.cur)
|
|
break;
|
|
read_iter_.MoveNext();
|
|
} // for(;;)
|
|
PERFETTO_DCHECK(read_iter_.cur == it.cur);
|
|
|
|
if (PERFETTO_UNLIKELY(packet_corruption)) {
|
|
// ReadNextPacketInChunk() already records an abi violation for this case.
|
|
*packet = TracePacket(); // clear.
|
|
return ReadAheadResult::kFailedStayOnSameSequence;
|
|
}
|
|
|
|
return ReadAheadResult::kSucceededReturnSlices;
|
|
} // for(it...) [readahead loop]
|
|
return ReadAheadResult::kFailedMoveToNextSequence;
|
|
}
|
|
|
|
TraceBuffer::ReadPacketResult TraceBuffer::ReadNextPacketInChunk(
|
|
ChunkMeta* chunk_meta,
|
|
TracePacket* packet) {
|
|
PERFETTO_DCHECK(chunk_meta->num_fragments_read < chunk_meta->num_fragments);
|
|
PERFETTO_DCHECK(!(chunk_meta->flags & kChunkNeedsPatching));
|
|
|
|
const uint8_t* record_begin =
|
|
reinterpret_cast<const uint8_t*>(chunk_meta->chunk_record);
|
|
const uint8_t* record_end = record_begin + chunk_meta->chunk_record->size;
|
|
const uint8_t* packets_begin = record_begin + sizeof(ChunkRecord);
|
|
const uint8_t* packet_begin = packets_begin + chunk_meta->cur_fragment_offset;
|
|
|
|
if (PERFETTO_UNLIKELY(packet_begin < packets_begin ||
|
|
packet_begin >= record_end)) {
|
|
// The producer has a bug or is malicious and did declare that the chunk
|
|
// contains more packets beyond its boundaries.
|
|
stats_.set_abi_violations(stats_.abi_violations() + 1);
|
|
PERFETTO_DCHECK(suppress_client_dchecks_for_testing_);
|
|
chunk_meta->cur_fragment_offset = 0;
|
|
chunk_meta->num_fragments_read = chunk_meta->num_fragments;
|
|
if (PERFETTO_LIKELY(chunk_meta->is_complete())) {
|
|
stats_.set_chunks_read(stats_.chunks_read() + 1);
|
|
stats_.set_bytes_read(stats_.bytes_read() +
|
|
chunk_meta->chunk_record->size);
|
|
}
|
|
return ReadPacketResult::kFailedInvalidPacket;
|
|
}
|
|
|
|
// A packet (or a fragment) starts with a varint stating its size, followed
|
|
// by its content. The varint shouldn't be larger than 4 bytes (just in case
|
|
// the producer is using a redundant encoding)
|
|
uint64_t packet_size = 0;
|
|
const uint8_t* header_end =
|
|
std::min(packet_begin + protozero::proto_utils::kMessageLengthFieldSize,
|
|
record_end);
|
|
const uint8_t* packet_data = protozero::proto_utils::ParseVarInt(
|
|
packet_begin, header_end, &packet_size);
|
|
|
|
const uint8_t* next_packet = packet_data + packet_size;
|
|
if (PERFETTO_UNLIKELY(next_packet <= packet_begin ||
|
|
next_packet > record_end)) {
|
|
// In BufferExhaustedPolicy::kDrop mode, TraceWriter may abort a fragmented
|
|
// packet by writing an invalid size in the last fragment's header. We
|
|
// should handle this case without recording an ABI violation (since Android
|
|
// R).
|
|
if (packet_size != SharedMemoryABI::kPacketSizeDropPacket) {
|
|
stats_.set_abi_violations(stats_.abi_violations() + 1);
|
|
PERFETTO_DCHECK(suppress_client_dchecks_for_testing_);
|
|
} else {
|
|
stats_.set_trace_writer_packet_loss(stats_.trace_writer_packet_loss() +
|
|
1);
|
|
}
|
|
chunk_meta->cur_fragment_offset = 0;
|
|
chunk_meta->num_fragments_read = chunk_meta->num_fragments;
|
|
if (PERFETTO_LIKELY(chunk_meta->is_complete())) {
|
|
stats_.set_chunks_read(stats_.chunks_read() + 1);
|
|
stats_.set_bytes_read(stats_.bytes_read() +
|
|
chunk_meta->chunk_record->size);
|
|
}
|
|
return ReadPacketResult::kFailedInvalidPacket;
|
|
}
|
|
|
|
chunk_meta->cur_fragment_offset =
|
|
static_cast<uint16_t>(next_packet - packets_begin);
|
|
chunk_meta->num_fragments_read++;
|
|
|
|
if (PERFETTO_UNLIKELY(chunk_meta->num_fragments_read ==
|
|
chunk_meta->num_fragments &&
|
|
chunk_meta->is_complete())) {
|
|
stats_.set_chunks_read(stats_.chunks_read() + 1);
|
|
stats_.set_bytes_read(stats_.bytes_read() + chunk_meta->chunk_record->size);
|
|
} else {
|
|
// We have at least one more packet to parse. It should be within the chunk.
|
|
if (chunk_meta->cur_fragment_offset + sizeof(ChunkRecord) >=
|
|
chunk_meta->chunk_record->size) {
|
|
PERFETTO_DCHECK(suppress_client_dchecks_for_testing_);
|
|
}
|
|
}
|
|
|
|
chunk_meta->set_last_read_packet_skipped(false);
|
|
|
|
if (PERFETTO_UNLIKELY(packet_size == 0))
|
|
return ReadPacketResult::kFailedEmptyPacket;
|
|
|
|
if (PERFETTO_LIKELY(packet))
|
|
packet->AddSlice(packet_data, static_cast<size_t>(packet_size));
|
|
|
|
return ReadPacketResult::kSucceeded;
|
|
}
|
|
|
|
void TraceBuffer::DiscardWrite() {
|
|
PERFETTO_DCHECK(overwrite_policy_ == kDiscard);
|
|
discard_writes_ = true;
|
|
stats_.set_chunks_discarded(stats_.chunks_discarded() + 1);
|
|
TRACE_BUFFER_DLOG(" discarding write");
|
|
}
|
|
|
|
} // namespace perfetto
|
|
// gen_amalgamated begin source: src/tracing/core/tracing_service_impl.cc
|
|
// gen_amalgamated begin header: src/tracing/core/tracing_service_impl.h
|
|
// gen_amalgamated begin header: include/perfetto/ext/base/circular_queue.h
|
|
/*
|
|
* Copyright (C) 2019 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
#ifndef INCLUDE_PERFETTO_EXT_BASE_CIRCULAR_QUEUE_H_
|
|
#define INCLUDE_PERFETTO_EXT_BASE_CIRCULAR_QUEUE_H_
|
|
|
|
#include <stdint.h>
|
|
#include <iterator>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/base/logging.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/utils.h"
|
|
|
|
namespace perfetto {
|
|
namespace base {
|
|
|
|
// CircularQueue is a push-back-only / pop-front-only queue with the following
|
|
// characteristics:
|
|
// - The storage is based on a flat circular buffer. Beginning and end wrap
|
|
// as necessary, to keep pushes and pops O(1) as long as capacity expansion is
|
|
// not required.
|
|
// - Capacity is automatically expanded like in a std::vector. Expansion has a
|
|
// O(N) cost.
|
|
// - It allows random access, allowing in-place std::sort.
|
|
// - Iterators are not stable. Mutating the container invalidates all iterators.
|
|
// - It doesn't bother with const-correctness.
|
|
//
|
|
// Implementation details:
|
|
// Internally, |begin|, |end| and iterators use 64-bit monotonic indexes, which
|
|
// are incremented as if the queue was backed by unlimited storage.
|
|
// Even assuming that elements are inserted and removed every nanosecond, 64 bit
|
|
// is enough for 584 years.
|
|
// Wrapping happens only when addressing elements in the underlying circular
|
|
// storage. This limits the complexity and avoiding dealing with modular
|
|
// arithmetic all over the places.
|
|
template <class T>
|
|
class CircularQueue {
|
|
public:
|
|
class Iterator {
|
|
public:
|
|
using difference_type = ptrdiff_t;
|
|
using value_type = T;
|
|
using pointer = T*;
|
|
using reference = T&;
|
|
using iterator_category = std::random_access_iterator_tag;
|
|
|
|
Iterator(CircularQueue* queue, uint64_t pos, uint32_t generation)
|
|
: queue_(queue),
|
|
pos_(pos)
|
|
#if PERFETTO_DCHECK_IS_ON()
|
|
,
|
|
generation_(generation)
|
|
#endif
|
|
{
|
|
ignore_result(generation);
|
|
}
|
|
|
|
T* operator->() {
|
|
#if PERFETTO_DCHECK_IS_ON()
|
|
PERFETTO_DCHECK(generation_ == queue_->generation());
|
|
#endif
|
|
return queue_->Get(pos_);
|
|
}
|
|
|
|
const T* operator->() const {
|
|
return const_cast<CircularQueue<T>::Iterator*>(this)->operator->();
|
|
}
|
|
|
|
T& operator*() { return *(operator->()); }
|
|
const T& operator*() const { return *(operator->()); }
|
|
|
|
value_type& operator[](difference_type i) { return *(*this + i); }
|
|
|
|
const value_type& operator[](difference_type i) const {
|
|
return const_cast<CircularQueue<T>::Iterator&>(*this)[i];
|
|
}
|
|
|
|
Iterator& operator++() {
|
|
Add(1);
|
|
return *this;
|
|
}
|
|
|
|
Iterator operator++(int) {
|
|
Iterator ret = *this;
|
|
Add(1);
|
|
return ret;
|
|
}
|
|
|
|
Iterator& operator--() {
|
|
Add(-1);
|
|
return *this;
|
|
}
|
|
|
|
Iterator operator--(int) {
|
|
Iterator ret = *this;
|
|
Add(-1);
|
|
return ret;
|
|
}
|
|
|
|
friend Iterator operator+(const Iterator& iter, difference_type offset) {
|
|
Iterator ret = iter;
|
|
ret.Add(offset);
|
|
return ret;
|
|
}
|
|
|
|
Iterator& operator+=(difference_type offset) {
|
|
Add(offset);
|
|
return *this;
|
|
}
|
|
|
|
friend Iterator operator-(const Iterator& iter, difference_type offset) {
|
|
Iterator ret = iter;
|
|
ret.Add(-offset);
|
|
return ret;
|
|
}
|
|
|
|
Iterator& operator-=(difference_type offset) {
|
|
Add(-offset);
|
|
return *this;
|
|
}
|
|
|
|
friend ptrdiff_t operator-(const Iterator& lhs, const Iterator& rhs) {
|
|
return static_cast<ptrdiff_t>(lhs.pos_) -
|
|
static_cast<ptrdiff_t>(rhs.pos_);
|
|
}
|
|
|
|
friend bool operator==(const Iterator& lhs, const Iterator& rhs) {
|
|
return lhs.pos_ == rhs.pos_;
|
|
}
|
|
|
|
friend bool operator!=(const Iterator& lhs, const Iterator& rhs) {
|
|
return lhs.pos_ != rhs.pos_;
|
|
}
|
|
|
|
friend bool operator<(const Iterator& lhs, const Iterator& rhs) {
|
|
return lhs.pos_ < rhs.pos_;
|
|
}
|
|
|
|
friend bool operator<=(const Iterator& lhs, const Iterator& rhs) {
|
|
return lhs.pos_ <= rhs.pos_;
|
|
}
|
|
|
|
friend bool operator>(const Iterator& lhs, const Iterator& rhs) {
|
|
return lhs.pos_ > rhs.pos_;
|
|
}
|
|
|
|
friend bool operator>=(const Iterator& lhs, const Iterator& rhs) {
|
|
return lhs.pos_ >= rhs.pos_;
|
|
}
|
|
|
|
private:
|
|
inline void Add(difference_type offset) {
|
|
pos_ = static_cast<uint64_t>(static_cast<difference_type>(pos_) + offset);
|
|
PERFETTO_DCHECK(pos_ <= queue_->end_);
|
|
}
|
|
|
|
CircularQueue* queue_;
|
|
uint64_t pos_;
|
|
|
|
#if PERFETTO_DCHECK_IS_ON()
|
|
uint32_t generation_;
|
|
#endif
|
|
};
|
|
|
|
CircularQueue(size_t initial_capacity = 1024) { Grow(initial_capacity); }
|
|
|
|
CircularQueue(CircularQueue&& other) noexcept {
|
|
// Copy all fields using the (private) default copy assignment operator.
|
|
*this = other;
|
|
increment_generation();
|
|
new (&other) CircularQueue(); // Reset the old queue so it's still usable.
|
|
}
|
|
|
|
CircularQueue& operator=(CircularQueue&& other) {
|
|
this->~CircularQueue(); // Destroy the current state.
|
|
new (this) CircularQueue(std::move(other)); // Use the move ctor above.
|
|
return *this;
|
|
}
|
|
|
|
~CircularQueue() {
|
|
if (!entries_) {
|
|
PERFETTO_DCHECK(empty());
|
|
return;
|
|
}
|
|
clear(); // Invoke destructors on all alive entries.
|
|
PERFETTO_DCHECK(empty());
|
|
free(entries_);
|
|
}
|
|
|
|
template <typename... Args>
|
|
void emplace_back(Args&&... args) {
|
|
increment_generation();
|
|
if (PERFETTO_UNLIKELY(size() >= capacity_))
|
|
Grow();
|
|
T* slot = Get(end_++);
|
|
new (slot) T(std::forward<Args>(args)...);
|
|
}
|
|
|
|
void erase_front(size_t n) {
|
|
increment_generation();
|
|
for (; n && (begin_ < end_); --n) {
|
|
Get(begin_)->~T();
|
|
begin_++; // This needs to be its own statement, Get() checks begin_.
|
|
}
|
|
}
|
|
|
|
void pop_front() { erase_front(1); }
|
|
|
|
void clear() { erase_front(size()); }
|
|
|
|
T& at(size_t idx) {
|
|
PERFETTO_DCHECK(idx < size());
|
|
return *Get(begin_ + idx);
|
|
}
|
|
|
|
Iterator begin() { return Iterator(this, begin_, generation()); }
|
|
Iterator end() { return Iterator(this, end_, generation()); }
|
|
T& front() { return *begin(); }
|
|
T& back() { return *(end() - 1); }
|
|
|
|
bool empty() const { return size() == 0; }
|
|
|
|
size_t size() const {
|
|
PERFETTO_DCHECK(end_ - begin_ <= capacity_);
|
|
return static_cast<size_t>(end_ - begin_);
|
|
}
|
|
|
|
size_t capacity() const { return capacity_; }
|
|
|
|
#if PERFETTO_DCHECK_IS_ON()
|
|
uint32_t generation() const { return generation_; }
|
|
void increment_generation() { ++generation_; }
|
|
#else
|
|
uint32_t generation() const { return 0; }
|
|
void increment_generation() {}
|
|
#endif
|
|
|
|
private:
|
|
CircularQueue(const CircularQueue&) = delete;
|
|
CircularQueue& operator=(const CircularQueue&) = default;
|
|
|
|
void Grow(size_t new_capacity = 0) {
|
|
// Capacity must be always a power of two. This allows Get() to use a simple
|
|
// bitwise-AND for handling the wrapping instead of a full division.
|
|
new_capacity = new_capacity ? new_capacity : capacity_ * 2;
|
|
PERFETTO_CHECK((new_capacity & (new_capacity - 1)) == 0); // Must be pow2.
|
|
|
|
// On 32-bit systems this might hit the 4GB wall and overflow. We can't do
|
|
// anything other than crash in this case.
|
|
PERFETTO_CHECK(new_capacity > capacity_);
|
|
size_t malloc_size = new_capacity * sizeof(T);
|
|
PERFETTO_CHECK(malloc_size > new_capacity);
|
|
auto* new_vec = static_cast<T*>(malloc(malloc_size));
|
|
|
|
// Move all elements in the expanded array.
|
|
size_t new_size = 0;
|
|
for (uint64_t i = begin_; i < end_; i++)
|
|
new (&new_vec[new_size++]) T(std::move(*Get(i))); // Placement move ctor.
|
|
|
|
// Even if all the elements are std::move()-d and likely empty, we are still
|
|
// required to call the dtor for them.
|
|
for (uint64_t i = begin_; i < end_; i++)
|
|
Get(i)->~T();
|
|
free(entries_); // It's fine to free(nullptr) (for the ctor call case).
|
|
|
|
begin_ = 0;
|
|
end_ = new_size;
|
|
capacity_ = new_capacity;
|
|
entries_ = new_vec;
|
|
}
|
|
|
|
inline T* Get(uint64_t pos) {
|
|
PERFETTO_DCHECK(pos >= begin_ && pos < end_);
|
|
PERFETTO_DCHECK((capacity_ & (capacity_ - 1)) == 0); // Must be a pow2.
|
|
auto index = static_cast<size_t>(pos & (capacity_ - 1));
|
|
return &entries_[index];
|
|
}
|
|
|
|
// Underlying storage. It's raw malloc-ed rather than being a unique_ptr<T[]>
|
|
// to allow having uninitialized entries inside it.
|
|
T* entries_ = nullptr;
|
|
size_t capacity_ = 0; // Number of allocated slots (NOT bytes) in |entries_|.
|
|
|
|
// The |begin_| and |end_| indexes are monotonic and never wrap. Modular arith
|
|
// is used only when dereferencing entries in the vector.
|
|
uint64_t begin_ = 0;
|
|
uint64_t end_ = 0;
|
|
|
|
// Generation is used in debug builds only for checking iterator validity.
|
|
#if PERFETTO_DCHECK_IS_ON()
|
|
uint32_t generation_ = 0;
|
|
#endif
|
|
};
|
|
|
|
} // namespace base
|
|
} // namespace perfetto
|
|
|
|
#endif // INCLUDE_PERFETTO_EXT_BASE_CIRCULAR_QUEUE_H_
|
|
/*
|
|
* Copyright (C) 2017 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
#ifndef SRC_TRACING_CORE_TRACING_SERVICE_IMPL_H_
|
|
#define SRC_TRACING_CORE_TRACING_SERVICE_IMPL_H_
|
|
|
|
#include <algorithm>
|
|
#include <functional>
|
|
#include <map>
|
|
#include <memory>
|
|
#include <mutex>
|
|
#include <set>
|
|
#include <utility>
|
|
#include <vector>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/base/logging.h"
|
|
// gen_amalgamated expanded: #include "perfetto/base/time.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/circular_queue.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/optional.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/weak_ptr.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/basic_types.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/commit_data_request.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/observable_events.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/shared_memory_abi.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/trace_stats.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/tracing_service.h"
|
|
// gen_amalgamated expanded: #include "perfetto/tracing/core/data_source_config.h"
|
|
// gen_amalgamated expanded: #include "perfetto/tracing/core/data_source_descriptor.h"
|
|
// gen_amalgamated expanded: #include "perfetto/tracing/core/forward_decls.h"
|
|
// gen_amalgamated expanded: #include "perfetto/tracing/core/trace_config.h"
|
|
// gen_amalgamated expanded: #include "src/tracing/core/id_allocator.h"
|
|
|
|
namespace perfetto {
|
|
|
|
namespace base {
|
|
class TaskRunner;
|
|
} // namespace base
|
|
|
|
class Consumer;
|
|
class Producer;
|
|
class SharedMemory;
|
|
class SharedMemoryArbiterImpl;
|
|
class TraceBuffer;
|
|
class TracePacket;
|
|
|
|
// The tracing service business logic.
|
|
class TracingServiceImpl : public TracingService {
|
|
private:
|
|
struct DataSourceInstance;
|
|
|
|
public:
|
|
static constexpr size_t kDefaultShmPageSize = 4096ul;
|
|
static constexpr size_t kDefaultShmSize = 256 * 1024ul;
|
|
static constexpr size_t kMaxShmSize = 32 * 1024 * 1024ul;
|
|
static constexpr uint32_t kDataSourceStopTimeoutMs = 5000;
|
|
static constexpr uint8_t kSyncMarker[] = {0x82, 0x47, 0x7a, 0x76, 0xb2, 0x8d,
|
|
0x42, 0xba, 0x81, 0xdc, 0x33, 0x32,
|
|
0x6d, 0x57, 0xa0, 0x79};
|
|
|
|
// The implementation behind the service endpoint exposed to each producer.
|
|
class ProducerEndpointImpl : public TracingService::ProducerEndpoint {
|
|
public:
|
|
ProducerEndpointImpl(ProducerID,
|
|
uid_t uid,
|
|
TracingServiceImpl*,
|
|
base::TaskRunner*,
|
|
Producer*,
|
|
const std::string& producer_name,
|
|
bool in_process,
|
|
bool smb_scraping_enabled);
|
|
~ProducerEndpointImpl() override;
|
|
|
|
// TracingService::ProducerEndpoint implementation.
|
|
void RegisterDataSource(const DataSourceDescriptor&) override;
|
|
void UnregisterDataSource(const std::string& name) override;
|
|
void RegisterTraceWriter(uint32_t writer_id,
|
|
uint32_t target_buffer) override;
|
|
void UnregisterTraceWriter(uint32_t writer_id) override;
|
|
void CommitData(const CommitDataRequest&, CommitDataCallback) override;
|
|
void SetupSharedMemory(std::unique_ptr<SharedMemory>,
|
|
size_t page_size_bytes,
|
|
bool provided_by_producer);
|
|
std::unique_ptr<TraceWriter> CreateTraceWriter(
|
|
BufferID,
|
|
BufferExhaustedPolicy) override;
|
|
SharedMemoryArbiter* MaybeSharedMemoryArbiter() override;
|
|
bool IsShmemProvidedByProducer() const override;
|
|
void NotifyFlushComplete(FlushRequestID) override;
|
|
void NotifyDataSourceStarted(DataSourceInstanceID) override;
|
|
void NotifyDataSourceStopped(DataSourceInstanceID) override;
|
|
SharedMemory* shared_memory() const override;
|
|
size_t shared_buffer_page_size_kb() const override;
|
|
void ActivateTriggers(const std::vector<std::string>&) override;
|
|
void Sync(std::function<void()> callback) override;
|
|
|
|
void OnTracingSetup();
|
|
void SetupDataSource(DataSourceInstanceID, const DataSourceConfig&);
|
|
void StartDataSource(DataSourceInstanceID, const DataSourceConfig&);
|
|
void StopDataSource(DataSourceInstanceID);
|
|
void Flush(FlushRequestID, const std::vector<DataSourceInstanceID>&);
|
|
void OnFreeBuffers(const std::vector<BufferID>& target_buffers);
|
|
void ClearIncrementalState(const std::vector<DataSourceInstanceID>&);
|
|
|
|
bool is_allowed_target_buffer(BufferID buffer_id) const {
|
|
return allowed_target_buffers_.count(buffer_id);
|
|
}
|
|
|
|
base::Optional<BufferID> buffer_id_for_writer(WriterID writer_id) const {
|
|
const auto it = writers_.find(writer_id);
|
|
if (it != writers_.end())
|
|
return it->second;
|
|
return base::nullopt;
|
|
}
|
|
|
|
uid_t uid() const { return uid_; }
|
|
|
|
private:
|
|
friend class TracingServiceImpl;
|
|
friend class TracingServiceImplTest;
|
|
friend class TracingIntegrationTest;
|
|
ProducerEndpointImpl(const ProducerEndpointImpl&) = delete;
|
|
ProducerEndpointImpl& operator=(const ProducerEndpointImpl&) = delete;
|
|
|
|
ProducerID const id_;
|
|
const uid_t uid_;
|
|
TracingServiceImpl* const service_;
|
|
base::TaskRunner* const task_runner_;
|
|
Producer* producer_;
|
|
std::unique_ptr<SharedMemory> shared_memory_;
|
|
size_t shared_buffer_page_size_kb_ = 0;
|
|
SharedMemoryABI shmem_abi_;
|
|
size_t shmem_size_hint_bytes_ = 0;
|
|
size_t shmem_page_size_hint_bytes_ = 0;
|
|
bool is_shmem_provided_by_producer_ = false;
|
|
const std::string name_;
|
|
bool in_process_;
|
|
bool smb_scraping_enabled_;
|
|
|
|
// Set of the global target_buffer IDs that the producer is configured to
|
|
// write into in any active tracing session.
|
|
std::set<BufferID> allowed_target_buffers_;
|
|
|
|
// Maps registered TraceWriter IDs to their target buffers as registered by
|
|
// the producer. Note that producers aren't required to register their
|
|
// writers, so we may see commits of chunks with WriterIDs that aren't
|
|
// contained in this map. However, if a producer does register a writer, the
|
|
// service will prevent the writer from writing into any other buffer than
|
|
// the one associated with it here. The BufferIDs stored in this map are
|
|
// untrusted, so need to be verified against |allowed_target_buffers_|
|
|
// before use.
|
|
std::map<WriterID, BufferID> writers_;
|
|
|
|
// This is used only in in-process configurations.
|
|
// SharedMemoryArbiterImpl methods themselves are thread-safe.
|
|
std::unique_ptr<SharedMemoryArbiterImpl> inproc_shmem_arbiter_;
|
|
|
|
PERFETTO_THREAD_CHECKER(thread_checker_)
|
|
base::WeakPtrFactory<ProducerEndpointImpl> weak_ptr_factory_; // Keep last.
|
|
};
|
|
|
|
// The implementation behind the service endpoint exposed to each consumer.
|
|
class ConsumerEndpointImpl : public TracingService::ConsumerEndpoint {
|
|
public:
|
|
ConsumerEndpointImpl(TracingServiceImpl*,
|
|
base::TaskRunner*,
|
|
Consumer*,
|
|
uid_t uid);
|
|
~ConsumerEndpointImpl() override;
|
|
|
|
void NotifyOnTracingDisabled();
|
|
base::WeakPtr<ConsumerEndpointImpl> GetWeakPtr();
|
|
|
|
// TracingService::ConsumerEndpoint implementation.
|
|
void EnableTracing(const TraceConfig&, base::ScopedFile) override;
|
|
void ChangeTraceConfig(const TraceConfig& cfg) override;
|
|
void StartTracing() override;
|
|
void DisableTracing() override;
|
|
void ReadBuffers() override;
|
|
void FreeBuffers() override;
|
|
void Flush(uint32_t timeout_ms, FlushCallback) override;
|
|
void Detach(const std::string& key) override;
|
|
void Attach(const std::string& key) override;
|
|
void GetTraceStats() override;
|
|
void ObserveEvents(uint32_t enabled_event_types) override;
|
|
void QueryServiceState(QueryServiceStateCallback) override;
|
|
void QueryCapabilities(QueryCapabilitiesCallback) override;
|
|
|
|
// Will queue a task to notify the consumer about the state change.
|
|
void OnDataSourceInstanceStateChange(const ProducerEndpointImpl&,
|
|
const DataSourceInstance&);
|
|
void OnAllDataSourcesStarted();
|
|
|
|
private:
|
|
friend class TracingServiceImpl;
|
|
ConsumerEndpointImpl(const ConsumerEndpointImpl&) = delete;
|
|
ConsumerEndpointImpl& operator=(const ConsumerEndpointImpl&) = delete;
|
|
|
|
// Returns a pointer to an ObservableEvents object that the caller can fill
|
|
// and schedules a task to send the ObservableEvents to the consumer.
|
|
ObservableEvents* AddObservableEvents();
|
|
|
|
base::TaskRunner* const task_runner_;
|
|
TracingServiceImpl* const service_;
|
|
Consumer* const consumer_;
|
|
uid_t const uid_;
|
|
TracingSessionID tracing_session_id_ = 0;
|
|
|
|
// Whether the consumer is interested in DataSourceInstance state change
|
|
// events.
|
|
uint32_t observable_events_mask_ = 0;
|
|
|
|
// ObservableEvents that will be sent to the consumer. If set, a task to
|
|
// flush the events to the consumer has been queued.
|
|
std::unique_ptr<ObservableEvents> observable_events_;
|
|
|
|
PERFETTO_THREAD_CHECKER(thread_checker_)
|
|
base::WeakPtrFactory<ConsumerEndpointImpl> weak_ptr_factory_; // Keep last.
|
|
};
|
|
|
|
explicit TracingServiceImpl(std::unique_ptr<SharedMemory::Factory>,
|
|
base::TaskRunner*);
|
|
~TracingServiceImpl() override;
|
|
|
|
// Called by ProducerEndpointImpl.
|
|
void DisconnectProducer(ProducerID);
|
|
void RegisterDataSource(ProducerID, const DataSourceDescriptor&);
|
|
void UnregisterDataSource(ProducerID, const std::string& name);
|
|
void CopyProducerPageIntoLogBuffer(ProducerID,
|
|
uid_t,
|
|
WriterID,
|
|
ChunkID,
|
|
BufferID,
|
|
uint16_t num_fragments,
|
|
uint8_t chunk_flags,
|
|
bool chunk_complete,
|
|
const uint8_t* src,
|
|
size_t size);
|
|
void ApplyChunkPatches(ProducerID,
|
|
const std::vector<CommitDataRequest::ChunkToPatch>&);
|
|
void NotifyFlushDoneForProducer(ProducerID, FlushRequestID);
|
|
void NotifyDataSourceStarted(ProducerID, const DataSourceInstanceID);
|
|
void NotifyDataSourceStopped(ProducerID, const DataSourceInstanceID);
|
|
void ActivateTriggers(ProducerID, const std::vector<std::string>& triggers);
|
|
|
|
// Called by ConsumerEndpointImpl.
|
|
bool DetachConsumer(ConsumerEndpointImpl*, const std::string& key);
|
|
bool AttachConsumer(ConsumerEndpointImpl*, const std::string& key);
|
|
void DisconnectConsumer(ConsumerEndpointImpl*);
|
|
bool EnableTracing(ConsumerEndpointImpl*,
|
|
const TraceConfig&,
|
|
base::ScopedFile);
|
|
void ChangeTraceConfig(ConsumerEndpointImpl*, const TraceConfig&);
|
|
|
|
bool StartTracing(TracingSessionID);
|
|
void DisableTracing(TracingSessionID, bool disable_immediately = false);
|
|
void Flush(TracingSessionID tsid,
|
|
uint32_t timeout_ms,
|
|
ConsumerEndpoint::FlushCallback);
|
|
void FlushAndDisableTracing(TracingSessionID);
|
|
bool ReadBuffers(TracingSessionID, ConsumerEndpointImpl*);
|
|
void FreeBuffers(TracingSessionID);
|
|
|
|
// Service implementation.
|
|
std::unique_ptr<TracingService::ProducerEndpoint> ConnectProducer(
|
|
Producer*,
|
|
uid_t uid,
|
|
const std::string& producer_name,
|
|
size_t shared_memory_size_hint_bytes = 0,
|
|
bool in_process = false,
|
|
ProducerSMBScrapingMode smb_scraping_mode =
|
|
ProducerSMBScrapingMode::kDefault,
|
|
size_t shared_memory_page_size_hint_bytes = 0,
|
|
std::unique_ptr<SharedMemory> shm = nullptr) override;
|
|
|
|
std::unique_ptr<TracingService::ConsumerEndpoint> ConnectConsumer(
|
|
Consumer*,
|
|
uid_t) override;
|
|
|
|
// Set whether SMB scraping should be enabled by default or not. Producers can
|
|
// override this setting for their own SMBs.
|
|
void SetSMBScrapingEnabled(bool enabled) override {
|
|
smb_scraping_enabled_ = enabled;
|
|
}
|
|
|
|
// Exposed mainly for testing.
|
|
size_t num_producers() const { return producers_.size(); }
|
|
ProducerEndpointImpl* GetProducer(ProducerID) const;
|
|
|
|
private:
|
|
friend class TracingServiceImplTest;
|
|
friend class TracingIntegrationTest;
|
|
|
|
struct RegisteredDataSource {
|
|
ProducerID producer_id;
|
|
DataSourceDescriptor descriptor;
|
|
};
|
|
|
|
// Represents an active data source for a tracing session.
|
|
struct DataSourceInstance {
|
|
DataSourceInstance(DataSourceInstanceID id,
|
|
const DataSourceConfig& cfg,
|
|
const std::string& ds_name,
|
|
bool notify_on_start,
|
|
bool notify_on_stop,
|
|
bool handles_incremental_state_invalidation)
|
|
: instance_id(id),
|
|
config(cfg),
|
|
data_source_name(ds_name),
|
|
will_notify_on_start(notify_on_start),
|
|
will_notify_on_stop(notify_on_stop),
|
|
handles_incremental_state_clear(
|
|
handles_incremental_state_invalidation) {}
|
|
DataSourceInstance(const DataSourceInstance&) = delete;
|
|
DataSourceInstance& operator=(const DataSourceInstance&) = delete;
|
|
|
|
DataSourceInstanceID instance_id;
|
|
DataSourceConfig config;
|
|
std::string data_source_name;
|
|
bool will_notify_on_start;
|
|
bool will_notify_on_stop;
|
|
bool handles_incremental_state_clear;
|
|
|
|
enum DataSourceInstanceState {
|
|
CONFIGURED,
|
|
STARTING,
|
|
STARTED,
|
|
STOPPING,
|
|
STOPPED
|
|
};
|
|
DataSourceInstanceState state = CONFIGURED;
|
|
};
|
|
|
|
struct PendingFlush {
|
|
std::set<ProducerID> producers;
|
|
ConsumerEndpoint::FlushCallback callback;
|
|
explicit PendingFlush(decltype(callback) cb) : callback(std::move(cb)) {}
|
|
};
|
|
|
|
// Holds the state of a tracing session. A tracing session is uniquely bound
|
|
// a specific Consumer. Each Consumer can own one or more sessions.
|
|
struct TracingSession {
|
|
enum State {
|
|
DISABLED = 0,
|
|
CONFIGURED,
|
|
STARTED,
|
|
DISABLING_WAITING_STOP_ACKS
|
|
};
|
|
|
|
TracingSession(TracingSessionID, ConsumerEndpointImpl*, const TraceConfig&);
|
|
|
|
size_t num_buffers() const { return buffers_index.size(); }
|
|
|
|
uint32_t delay_to_next_write_period_ms() const {
|
|
PERFETTO_DCHECK(write_period_ms > 0);
|
|
return write_period_ms -
|
|
(base::GetWallTimeMs().count() % write_period_ms);
|
|
}
|
|
|
|
uint32_t flush_timeout_ms() {
|
|
uint32_t timeout_ms = config.flush_timeout_ms();
|
|
return timeout_ms ? timeout_ms : kDefaultFlushTimeoutMs;
|
|
}
|
|
|
|
uint32_t data_source_stop_timeout_ms() {
|
|
uint32_t timeout_ms = config.data_source_stop_timeout_ms();
|
|
return timeout_ms ? timeout_ms : kDataSourceStopTimeoutMs;
|
|
}
|
|
|
|
PacketSequenceID GetPacketSequenceID(ProducerID producer_id,
|
|
WriterID writer_id) {
|
|
auto key = std::make_pair(producer_id, writer_id);
|
|
auto it = packet_sequence_ids.find(key);
|
|
if (it != packet_sequence_ids.end())
|
|
return it->second;
|
|
// We shouldn't run out of sequence IDs (producer ID is 16 bit, writer IDs
|
|
// are limited to 1024).
|
|
static_assert(kMaxPacketSequenceID > kMaxProducerID * kMaxWriterID,
|
|
"PacketSequenceID value space doesn't cover service "
|
|
"sequence ID and all producer/writer ID combinations!");
|
|
PERFETTO_DCHECK(last_packet_sequence_id < kMaxPacketSequenceID);
|
|
PacketSequenceID sequence_id = ++last_packet_sequence_id;
|
|
packet_sequence_ids[key] = sequence_id;
|
|
return sequence_id;
|
|
}
|
|
|
|
DataSourceInstance* GetDataSourceInstance(
|
|
ProducerID producer_id,
|
|
DataSourceInstanceID instance_id) {
|
|
for (auto& inst_kv : data_source_instances) {
|
|
if (inst_kv.first != producer_id ||
|
|
inst_kv.second.instance_id != instance_id) {
|
|
continue;
|
|
}
|
|
return &inst_kv.second;
|
|
}
|
|
return nullptr;
|
|
}
|
|
|
|
bool AllDataSourceInstancesStarted() {
|
|
return std::all_of(
|
|
data_source_instances.begin(), data_source_instances.end(),
|
|
[](decltype(data_source_instances)::const_reference x) {
|
|
return x.second.state == DataSourceInstance::STARTED;
|
|
});
|
|
}
|
|
|
|
bool AllDataSourceInstancesStopped() {
|
|
return std::all_of(
|
|
data_source_instances.begin(), data_source_instances.end(),
|
|
[](decltype(data_source_instances)::const_reference x) {
|
|
return x.second.state == DataSourceInstance::STOPPED;
|
|
});
|
|
}
|
|
|
|
const TracingSessionID id;
|
|
|
|
// The consumer that started the session.
|
|
// Can be nullptr if the consumer detached from the session.
|
|
ConsumerEndpointImpl* consumer_maybe_null;
|
|
|
|
// Unix uid of the consumer. This is valid even after the consumer detaches
|
|
// and does not change for the entire duration of the session. It is used to
|
|
// prevent that a consumer re-attaches to a session from a different uid.
|
|
uid_t const consumer_uid;
|
|
|
|
// The list of triggers this session received while alive and the time they
|
|
// were received at. This is used to insert 'fake' packets back to the
|
|
// consumer so they can tell when some event happened. The order matches the
|
|
// order they were received.
|
|
struct TriggerInfo {
|
|
uint64_t boot_time_ns;
|
|
std::string trigger_name;
|
|
std::string producer_name;
|
|
uid_t producer_uid;
|
|
};
|
|
std::vector<TriggerInfo> received_triggers;
|
|
|
|
// The trace config provided by the Consumer when calling
|
|
// EnableTracing(), plus any updates performed by ChangeTraceConfig.
|
|
TraceConfig config;
|
|
|
|
// List of data source instances that have been enabled on the various
|
|
// producers for this tracing session.
|
|
// TODO(rsavitski): at the time of writing, the map structure is unused
|
|
// (even when the calling code has a key). This is also an opportunity to
|
|
// consider an alternative data type, e.g. a map of vectors.
|
|
std::multimap<ProducerID, DataSourceInstance> data_source_instances;
|
|
|
|
// For each Flush(N) request, keeps track of the set of producers for which
|
|
// we are still awaiting a NotifyFlushComplete(N) ack.
|
|
std::map<FlushRequestID, PendingFlush> pending_flushes;
|
|
|
|
// Maps a per-trace-session buffer index into the corresponding global
|
|
// BufferID (shared namespace amongst all consumers). This vector has as
|
|
// many entries as |config.buffers_size()|.
|
|
std::vector<BufferID> buffers_index;
|
|
|
|
std::map<std::pair<ProducerID, WriterID>, PacketSequenceID>
|
|
packet_sequence_ids;
|
|
PacketSequenceID last_packet_sequence_id = kServicePacketSequenceID;
|
|
|
|
// Whether we should emit the trace stats next time we reach EOF while
|
|
// performing ReadBuffers.
|
|
bool should_emit_stats = false;
|
|
|
|
// Whether we should emit the sync marker the next time ReadBuffers() is
|
|
// called.
|
|
bool should_emit_sync_marker = false;
|
|
|
|
// Whether we mirrored the trace config back to the trace output yet.
|
|
bool did_emit_config = false;
|
|
|
|
// Whether we put the system info into the trace output yet.
|
|
bool did_emit_system_info = false;
|
|
|
|
// The number of received triggers we've emitted into the trace output.
|
|
size_t num_triggers_emitted_into_trace = 0;
|
|
|
|
// Packets that failed validation of the TrustedPacket.
|
|
uint64_t invalid_packets = 0;
|
|
|
|
// Set to true on the first call to MaybeNotifyAllDataSourcesStarted().
|
|
bool did_notify_all_data_source_started = false;
|
|
|
|
// Stores all lifecycle events of a particular type (i.e. associated with a
|
|
// single field id in the TracingServiceEvent proto).
|
|
struct LifecycleEvent {
|
|
LifecycleEvent(uint32_t f_id, uint32_t m_size = 1)
|
|
: field_id(f_id), max_size(m_size), timestamps(m_size) {}
|
|
|
|
// The field id of the event in the TracingServiceEvent proto.
|
|
uint32_t field_id;
|
|
|
|
// Stores the max size of |timestamps|. Set to 1 by default (in
|
|
// the constructor) but can be overriden in TraceSession constructor
|
|
// if a larger size is required.
|
|
uint32_t max_size;
|
|
|
|
// Stores the timestamps emitted for each event type (in nanoseconds).
|
|
// Emitted into the trace and cleared when the consumer next calls
|
|
// ReadBuffers.
|
|
base::CircularQueue<int64_t> timestamps;
|
|
};
|
|
std::vector<LifecycleEvent> lifecycle_events;
|
|
|
|
using ClockSnapshotData =
|
|
std::vector<std::pair<uint32_t /*clock_id*/, uint64_t /*ts*/>>;
|
|
|
|
// Initial clock snapshot, captured at trace start time (when state goes to
|
|
// TracingSession::STARTED). Emitted into the trace when the consumer first
|
|
// calls ReadBuffers().
|
|
ClockSnapshotData initial_clock_snapshot;
|
|
|
|
// Stores clock snapshots to emit into the trace as a ring buffer. This
|
|
// buffer is populated both periodically and when lifecycle events happen
|
|
// but only when significant clock drift is detected. Emitted into the trace
|
|
// and cleared when the consumer next calls ReadBuffers().
|
|
base::CircularQueue<ClockSnapshotData> clock_snapshot_ring_buffer;
|
|
|
|
State state = DISABLED;
|
|
|
|
// If the consumer detached the session, this variable defines the key used
|
|
// for identifying the session later when reattaching.
|
|
std::string detach_key;
|
|
|
|
// This is set when the Consumer calls sets |write_into_file| == true in the
|
|
// TraceConfig. In this case this represents the file we should stream the
|
|
// trace packets into, rather than returning it to the consumer via
|
|
// OnTraceData().
|
|
base::ScopedFile write_into_file;
|
|
uint32_t write_period_ms = 0;
|
|
uint64_t max_file_size_bytes = 0;
|
|
uint64_t bytes_written_into_file = 0;
|
|
};
|
|
|
|
TracingServiceImpl(const TracingServiceImpl&) = delete;
|
|
TracingServiceImpl& operator=(const TracingServiceImpl&) = delete;
|
|
|
|
DataSourceInstance* SetupDataSource(const TraceConfig::DataSource&,
|
|
const TraceConfig::ProducerConfig&,
|
|
const RegisteredDataSource&,
|
|
TracingSession*);
|
|
|
|
// Returns the next available ProducerID that is not in |producers_|.
|
|
ProducerID GetNextProducerID();
|
|
|
|
// Returns a pointer to the |tracing_sessions_| entry or nullptr if the
|
|
// session doesn't exists.
|
|
TracingSession* GetTracingSession(TracingSessionID);
|
|
|
|
// Returns a pointer to the |tracing_sessions_| entry, matching the given
|
|
// uid and detach key, or nullptr if no such session exists.
|
|
TracingSession* GetDetachedSession(uid_t, const std::string& key);
|
|
|
|
// Update the memory guard rail by using the latest information from the
|
|
// shared memory and trace buffers.
|
|
void UpdateMemoryGuardrail();
|
|
|
|
void StartDataSourceInstance(ProducerEndpointImpl* producer,
|
|
TracingSession* tracing_session,
|
|
DataSourceInstance* instance);
|
|
void StopDataSourceInstance(ProducerEndpointImpl* producer,
|
|
TracingSession* tracing_session,
|
|
DataSourceInstance* instance,
|
|
bool disable_immediately);
|
|
void PeriodicSnapshotTask(TracingSession* tracing_session);
|
|
void MaybeSnapshotClocksIntoRingBuffer(TracingSession*);
|
|
bool SnapshotClocks(TracingSession::ClockSnapshotData*);
|
|
void SnapshotLifecyleEvent(TracingSession*,
|
|
uint32_t field_id,
|
|
bool snapshot_clocks);
|
|
void EmitClockSnapshot(TracingSession* tracing_session,
|
|
TracingSession::ClockSnapshotData,
|
|
std::vector<TracePacket>*);
|
|
void EmitSyncMarker(std::vector<TracePacket>*);
|
|
void EmitStats(TracingSession*, std::vector<TracePacket>*);
|
|
TraceStats GetTraceStats(TracingSession* tracing_session);
|
|
void EmitLifecycleEvents(TracingSession*, std::vector<TracePacket>* packets);
|
|
void MaybeEmitTraceConfig(TracingSession*, std::vector<TracePacket>*);
|
|
void MaybeEmitSystemInfo(TracingSession*, std::vector<TracePacket>*);
|
|
void MaybeEmitReceivedTriggers(TracingSession*, std::vector<TracePacket>*);
|
|
void MaybeNotifyAllDataSourcesStarted(TracingSession*);
|
|
void OnFlushTimeout(TracingSessionID, FlushRequestID);
|
|
void OnDisableTracingTimeout(TracingSessionID);
|
|
void DisableTracingNotifyConsumerAndFlushFile(TracingSession*);
|
|
void PeriodicFlushTask(TracingSessionID, bool post_next_only);
|
|
void CompleteFlush(TracingSessionID tsid,
|
|
ConsumerEndpoint::FlushCallback callback,
|
|
bool success);
|
|
void ScrapeSharedMemoryBuffers(TracingSession* tracing_session,
|
|
ProducerEndpointImpl* producer);
|
|
void PeriodicClearIncrementalStateTask(TracingSessionID, bool post_next_only);
|
|
TraceBuffer* GetBufferByID(BufferID);
|
|
void OnStartTriggersTimeout(TracingSessionID tsid);
|
|
|
|
base::TaskRunner* const task_runner_;
|
|
std::unique_ptr<SharedMemory::Factory> shm_factory_;
|
|
ProducerID last_producer_id_ = 0;
|
|
DataSourceInstanceID last_data_source_instance_id_ = 0;
|
|
TracingSessionID last_tracing_session_id_ = 0;
|
|
FlushRequestID last_flush_request_id_ = 0;
|
|
uid_t uid_ = 0;
|
|
|
|
// Buffer IDs are global across all consumers (because a Producer can produce
|
|
// data for more than one trace session, hence more than one consumer).
|
|
IdAllocator<BufferID> buffer_ids_;
|
|
|
|
std::multimap<std::string /*name*/, RegisteredDataSource> data_sources_;
|
|
std::map<ProducerID, ProducerEndpointImpl*> producers_;
|
|
std::set<ConsumerEndpointImpl*> consumers_;
|
|
std::map<TracingSessionID, TracingSession> tracing_sessions_;
|
|
std::map<BufferID, std::unique_ptr<TraceBuffer>> buffers_;
|
|
std::map<std::string, int64_t> session_to_last_trace_s_;
|
|
|
|
bool smb_scraping_enabled_ = false;
|
|
bool lockdown_mode_ = false;
|
|
uint32_t min_write_period_ms_ = 100; // Overridable for testing.
|
|
|
|
uint8_t sync_marker_packet_[32]; // Lazily initialized.
|
|
size_t sync_marker_packet_size_ = 0;
|
|
|
|
// Stats.
|
|
uint64_t chunks_discarded_ = 0;
|
|
uint64_t patches_discarded_ = 0;
|
|
|
|
PERFETTO_THREAD_CHECKER(thread_checker_)
|
|
|
|
base::WeakPtrFactory<TracingServiceImpl>
|
|
weak_ptr_factory_; // Keep at the end.
|
|
};
|
|
|
|
} // namespace perfetto
|
|
|
|
#endif // SRC_TRACING_CORE_TRACING_SERVICE_IMPL_H_
|
|
// gen_amalgamated begin header: include/perfetto/tracing/core/tracing_service_capabilities.h
|
|
/*
|
|
* Copyright (C) 2020 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
#ifndef INCLUDE_PERFETTO_TRACING_CORE_TRACING_SERVICE_CAPABILITIES_H_
|
|
#define INCLUDE_PERFETTO_TRACING_CORE_TRACING_SERVICE_CAPABILITIES_H_
|
|
|
|
// Creates the aliases in the ::perfetto namespace, doing things like:
|
|
// using ::perfetto::Foo = ::perfetto::protos::gen::Foo.
|
|
// See comments in forward_decls.h for the historical reasons of this
|
|
// indirection layer.
|
|
// gen_amalgamated expanded: #include "perfetto/tracing/core/forward_decls.h"
|
|
|
|
// gen_amalgamated expanded: #include "protos/perfetto/common/tracing_service_capabilities.gen.h"
|
|
|
|
#endif // INCLUDE_PERFETTO_TRACING_CORE_TRACING_SERVICE_CAPABILITIES_H_
|
|
// gen_amalgamated begin header: include/perfetto/tracing/core/tracing_service_state.h
|
|
/*
|
|
* Copyright (C) 2017 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
|
|
#ifndef INCLUDE_PERFETTO_TRACING_CORE_TRACING_SERVICE_STATE_H_
|
|
#define INCLUDE_PERFETTO_TRACING_CORE_TRACING_SERVICE_STATE_H_
|
|
|
|
// Creates the aliases in the ::perfetto namespace, doing things like:
|
|
// using ::perfetto::Foo = ::perfetto::protos::gen::Foo.
|
|
// See comments in forward_decls.h for the historical reasons of this
|
|
// indirection layer.
|
|
// gen_amalgamated expanded: #include "perfetto/tracing/core/forward_decls.h"
|
|
|
|
// gen_amalgamated expanded: #include "protos/perfetto/common/tracing_service_state.gen.h"
|
|
|
|
#endif // INCLUDE_PERFETTO_TRACING_CORE_TRACING_SERVICE_STATE_H_
|
|
// gen_amalgamated begin header: gen/protos/perfetto/common/trace_stats.pbzero.h
|
|
// Autogenerated by the ProtoZero compiler plugin. DO NOT EDIT.
|
|
|
|
#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_COMMON_TRACE_STATS_PROTO_H_
|
|
#define PERFETTO_PROTOS_PROTOS_PERFETTO_COMMON_TRACE_STATS_PROTO_H_
|
|
|
|
#include <stddef.h>
|
|
#include <stdint.h>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/proto_utils.h"
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace pbzero {
|
|
|
|
class TraceStats_BufferStats;
|
|
|
|
class TraceStats_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/10, /*HAS_NONPACKED_REPEATED_FIELDS=*/true> {
|
|
public:
|
|
TraceStats_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
|
|
explicit TraceStats_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
|
|
explicit TraceStats_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
|
|
bool has_buffer_stats() const { return at<1>().valid(); }
|
|
::protozero::RepeatedFieldIterator<::protozero::ConstBytes> buffer_stats() const { return GetRepeated<::protozero::ConstBytes>(1); }
|
|
bool has_producers_connected() const { return at<2>().valid(); }
|
|
uint32_t producers_connected() const { return at<2>().as_uint32(); }
|
|
bool has_producers_seen() const { return at<3>().valid(); }
|
|
uint64_t producers_seen() const { return at<3>().as_uint64(); }
|
|
bool has_data_sources_registered() const { return at<4>().valid(); }
|
|
uint32_t data_sources_registered() const { return at<4>().as_uint32(); }
|
|
bool has_data_sources_seen() const { return at<5>().valid(); }
|
|
uint64_t data_sources_seen() const { return at<5>().as_uint64(); }
|
|
bool has_tracing_sessions() const { return at<6>().valid(); }
|
|
uint32_t tracing_sessions() const { return at<6>().as_uint32(); }
|
|
bool has_total_buffers() const { return at<7>().valid(); }
|
|
uint32_t total_buffers() const { return at<7>().as_uint32(); }
|
|
bool has_chunks_discarded() const { return at<8>().valid(); }
|
|
uint64_t chunks_discarded() const { return at<8>().as_uint64(); }
|
|
bool has_patches_discarded() const { return at<9>().valid(); }
|
|
uint64_t patches_discarded() const { return at<9>().as_uint64(); }
|
|
bool has_invalid_packets() const { return at<10>().valid(); }
|
|
uint64_t invalid_packets() const { return at<10>().as_uint64(); }
|
|
};
|
|
|
|
class TraceStats : public ::protozero::Message {
|
|
public:
|
|
using Decoder = TraceStats_Decoder;
|
|
enum : int32_t {
|
|
kBufferStatsFieldNumber = 1,
|
|
kProducersConnectedFieldNumber = 2,
|
|
kProducersSeenFieldNumber = 3,
|
|
kDataSourcesRegisteredFieldNumber = 4,
|
|
kDataSourcesSeenFieldNumber = 5,
|
|
kTracingSessionsFieldNumber = 6,
|
|
kTotalBuffersFieldNumber = 7,
|
|
kChunksDiscardedFieldNumber = 8,
|
|
kPatchesDiscardedFieldNumber = 9,
|
|
kInvalidPacketsFieldNumber = 10,
|
|
};
|
|
using BufferStats = ::perfetto::protos::pbzero::TraceStats_BufferStats;
|
|
template <typename T = TraceStats_BufferStats> T* add_buffer_stats() {
|
|
return BeginNestedMessage<T>(1);
|
|
}
|
|
|
|
void set_producers_connected(uint32_t value) {
|
|
AppendVarInt(2, value);
|
|
}
|
|
void set_producers_seen(uint64_t value) {
|
|
AppendVarInt(3, value);
|
|
}
|
|
void set_data_sources_registered(uint32_t value) {
|
|
AppendVarInt(4, value);
|
|
}
|
|
void set_data_sources_seen(uint64_t value) {
|
|
AppendVarInt(5, value);
|
|
}
|
|
void set_tracing_sessions(uint32_t value) {
|
|
AppendVarInt(6, value);
|
|
}
|
|
void set_total_buffers(uint32_t value) {
|
|
AppendVarInt(7, value);
|
|
}
|
|
void set_chunks_discarded(uint64_t value) {
|
|
AppendVarInt(8, value);
|
|
}
|
|
void set_patches_discarded(uint64_t value) {
|
|
AppendVarInt(9, value);
|
|
}
|
|
void set_invalid_packets(uint64_t value) {
|
|
AppendVarInt(10, value);
|
|
}
|
|
};
|
|
|
|
class TraceStats_BufferStats_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/19, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
|
|
public:
|
|
TraceStats_BufferStats_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
|
|
explicit TraceStats_BufferStats_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
|
|
explicit TraceStats_BufferStats_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
|
|
bool has_buffer_size() const { return at<12>().valid(); }
|
|
uint64_t buffer_size() const { return at<12>().as_uint64(); }
|
|
bool has_bytes_written() const { return at<1>().valid(); }
|
|
uint64_t bytes_written() const { return at<1>().as_uint64(); }
|
|
bool has_bytes_overwritten() const { return at<13>().valid(); }
|
|
uint64_t bytes_overwritten() const { return at<13>().as_uint64(); }
|
|
bool has_bytes_read() const { return at<14>().valid(); }
|
|
uint64_t bytes_read() const { return at<14>().as_uint64(); }
|
|
bool has_padding_bytes_written() const { return at<15>().valid(); }
|
|
uint64_t padding_bytes_written() const { return at<15>().as_uint64(); }
|
|
bool has_padding_bytes_cleared() const { return at<16>().valid(); }
|
|
uint64_t padding_bytes_cleared() const { return at<16>().as_uint64(); }
|
|
bool has_chunks_written() const { return at<2>().valid(); }
|
|
uint64_t chunks_written() const { return at<2>().as_uint64(); }
|
|
bool has_chunks_rewritten() const { return at<10>().valid(); }
|
|
uint64_t chunks_rewritten() const { return at<10>().as_uint64(); }
|
|
bool has_chunks_overwritten() const { return at<3>().valid(); }
|
|
uint64_t chunks_overwritten() const { return at<3>().as_uint64(); }
|
|
bool has_chunks_discarded() const { return at<18>().valid(); }
|
|
uint64_t chunks_discarded() const { return at<18>().as_uint64(); }
|
|
bool has_chunks_read() const { return at<17>().valid(); }
|
|
uint64_t chunks_read() const { return at<17>().as_uint64(); }
|
|
bool has_chunks_committed_out_of_order() const { return at<11>().valid(); }
|
|
uint64_t chunks_committed_out_of_order() const { return at<11>().as_uint64(); }
|
|
bool has_write_wrap_count() const { return at<4>().valid(); }
|
|
uint64_t write_wrap_count() const { return at<4>().as_uint64(); }
|
|
bool has_patches_succeeded() const { return at<5>().valid(); }
|
|
uint64_t patches_succeeded() const { return at<5>().as_uint64(); }
|
|
bool has_patches_failed() const { return at<6>().valid(); }
|
|
uint64_t patches_failed() const { return at<6>().as_uint64(); }
|
|
bool has_readaheads_succeeded() const { return at<7>().valid(); }
|
|
uint64_t readaheads_succeeded() const { return at<7>().as_uint64(); }
|
|
bool has_readaheads_failed() const { return at<8>().valid(); }
|
|
uint64_t readaheads_failed() const { return at<8>().as_uint64(); }
|
|
bool has_abi_violations() const { return at<9>().valid(); }
|
|
uint64_t abi_violations() const { return at<9>().as_uint64(); }
|
|
bool has_trace_writer_packet_loss() const { return at<19>().valid(); }
|
|
uint64_t trace_writer_packet_loss() const { return at<19>().as_uint64(); }
|
|
};
|
|
|
|
class TraceStats_BufferStats : public ::protozero::Message {
|
|
public:
|
|
using Decoder = TraceStats_BufferStats_Decoder;
|
|
enum : int32_t {
|
|
kBufferSizeFieldNumber = 12,
|
|
kBytesWrittenFieldNumber = 1,
|
|
kBytesOverwrittenFieldNumber = 13,
|
|
kBytesReadFieldNumber = 14,
|
|
kPaddingBytesWrittenFieldNumber = 15,
|
|
kPaddingBytesClearedFieldNumber = 16,
|
|
kChunksWrittenFieldNumber = 2,
|
|
kChunksRewrittenFieldNumber = 10,
|
|
kChunksOverwrittenFieldNumber = 3,
|
|
kChunksDiscardedFieldNumber = 18,
|
|
kChunksReadFieldNumber = 17,
|
|
kChunksCommittedOutOfOrderFieldNumber = 11,
|
|
kWriteWrapCountFieldNumber = 4,
|
|
kPatchesSucceededFieldNumber = 5,
|
|
kPatchesFailedFieldNumber = 6,
|
|
kReadaheadsSucceededFieldNumber = 7,
|
|
kReadaheadsFailedFieldNumber = 8,
|
|
kAbiViolationsFieldNumber = 9,
|
|
kTraceWriterPacketLossFieldNumber = 19,
|
|
};
|
|
void set_buffer_size(uint64_t value) {
|
|
AppendVarInt(12, value);
|
|
}
|
|
void set_bytes_written(uint64_t value) {
|
|
AppendVarInt(1, value);
|
|
}
|
|
void set_bytes_overwritten(uint64_t value) {
|
|
AppendVarInt(13, value);
|
|
}
|
|
void set_bytes_read(uint64_t value) {
|
|
AppendVarInt(14, value);
|
|
}
|
|
void set_padding_bytes_written(uint64_t value) {
|
|
AppendVarInt(15, value);
|
|
}
|
|
void set_padding_bytes_cleared(uint64_t value) {
|
|
AppendVarInt(16, value);
|
|
}
|
|
void set_chunks_written(uint64_t value) {
|
|
AppendVarInt(2, value);
|
|
}
|
|
void set_chunks_rewritten(uint64_t value) {
|
|
AppendVarInt(10, value);
|
|
}
|
|
void set_chunks_overwritten(uint64_t value) {
|
|
AppendVarInt(3, value);
|
|
}
|
|
void set_chunks_discarded(uint64_t value) {
|
|
AppendVarInt(18, value);
|
|
}
|
|
void set_chunks_read(uint64_t value) {
|
|
AppendVarInt(17, value);
|
|
}
|
|
void set_chunks_committed_out_of_order(uint64_t value) {
|
|
AppendVarInt(11, value);
|
|
}
|
|
void set_write_wrap_count(uint64_t value) {
|
|
AppendVarInt(4, value);
|
|
}
|
|
void set_patches_succeeded(uint64_t value) {
|
|
AppendVarInt(5, value);
|
|
}
|
|
void set_patches_failed(uint64_t value) {
|
|
AppendVarInt(6, value);
|
|
}
|
|
void set_readaheads_succeeded(uint64_t value) {
|
|
AppendVarInt(7, value);
|
|
}
|
|
void set_readaheads_failed(uint64_t value) {
|
|
AppendVarInt(8, value);
|
|
}
|
|
void set_abi_violations(uint64_t value) {
|
|
AppendVarInt(9, value);
|
|
}
|
|
void set_trace_writer_packet_loss(uint64_t value) {
|
|
AppendVarInt(19, value);
|
|
}
|
|
};
|
|
|
|
} // Namespace.
|
|
} // Namespace.
|
|
} // Namespace.
|
|
#endif // Include guard.
|
|
// gen_amalgamated begin header: gen/protos/perfetto/config/trace_config.pbzero.h
|
|
// Autogenerated by the ProtoZero compiler plugin. DO NOT EDIT.
|
|
|
|
#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_CONFIG_TRACE_CONFIG_PROTO_H_
|
|
#define PERFETTO_PROTOS_PROTOS_PERFETTO_CONFIG_TRACE_CONFIG_PROTO_H_
|
|
|
|
#include <stddef.h>
|
|
#include <stdint.h>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/proto_utils.h"
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace pbzero {
|
|
|
|
class DataSourceConfig;
|
|
class TraceConfig_BufferConfig;
|
|
class TraceConfig_BuiltinDataSource;
|
|
class TraceConfig_DataSource;
|
|
class TraceConfig_GuardrailOverrides;
|
|
class TraceConfig_IncidentReportConfig;
|
|
class TraceConfig_IncrementalStateConfig;
|
|
class TraceConfig_ProducerConfig;
|
|
class TraceConfig_StatsdMetadata;
|
|
class TraceConfig_TriggerConfig;
|
|
class TraceConfig_TriggerConfig_Trigger;
|
|
enum BuiltinClock : int32_t;
|
|
enum TraceConfig_BufferConfig_FillPolicy : int32_t;
|
|
enum TraceConfig_CompressionType : int32_t;
|
|
enum TraceConfig_LockdownModeOperation : int32_t;
|
|
enum TraceConfig_TriggerConfig_TriggerMode : int32_t;
|
|
|
|
enum TraceConfig_LockdownModeOperation : int32_t {
|
|
TraceConfig_LockdownModeOperation_LOCKDOWN_UNCHANGED = 0,
|
|
TraceConfig_LockdownModeOperation_LOCKDOWN_CLEAR = 1,
|
|
TraceConfig_LockdownModeOperation_LOCKDOWN_SET = 2,
|
|
};
|
|
|
|
const TraceConfig_LockdownModeOperation TraceConfig_LockdownModeOperation_MIN = TraceConfig_LockdownModeOperation_LOCKDOWN_UNCHANGED;
|
|
const TraceConfig_LockdownModeOperation TraceConfig_LockdownModeOperation_MAX = TraceConfig_LockdownModeOperation_LOCKDOWN_SET;
|
|
|
|
enum TraceConfig_CompressionType : int32_t {
|
|
TraceConfig_CompressionType_COMPRESSION_TYPE_UNSPECIFIED = 0,
|
|
TraceConfig_CompressionType_COMPRESSION_TYPE_DEFLATE = 1,
|
|
};
|
|
|
|
const TraceConfig_CompressionType TraceConfig_CompressionType_MIN = TraceConfig_CompressionType_COMPRESSION_TYPE_UNSPECIFIED;
|
|
const TraceConfig_CompressionType TraceConfig_CompressionType_MAX = TraceConfig_CompressionType_COMPRESSION_TYPE_DEFLATE;
|
|
|
|
enum TraceConfig_TriggerConfig_TriggerMode : int32_t {
|
|
TraceConfig_TriggerConfig_TriggerMode_UNSPECIFIED = 0,
|
|
TraceConfig_TriggerConfig_TriggerMode_START_TRACING = 1,
|
|
TraceConfig_TriggerConfig_TriggerMode_STOP_TRACING = 2,
|
|
};
|
|
|
|
const TraceConfig_TriggerConfig_TriggerMode TraceConfig_TriggerConfig_TriggerMode_MIN = TraceConfig_TriggerConfig_TriggerMode_UNSPECIFIED;
|
|
const TraceConfig_TriggerConfig_TriggerMode TraceConfig_TriggerConfig_TriggerMode_MAX = TraceConfig_TriggerConfig_TriggerMode_STOP_TRACING;
|
|
|
|
enum TraceConfig_BufferConfig_FillPolicy : int32_t {
|
|
TraceConfig_BufferConfig_FillPolicy_UNSPECIFIED = 0,
|
|
TraceConfig_BufferConfig_FillPolicy_RING_BUFFER = 1,
|
|
TraceConfig_BufferConfig_FillPolicy_DISCARD = 2,
|
|
};
|
|
|
|
const TraceConfig_BufferConfig_FillPolicy TraceConfig_BufferConfig_FillPolicy_MIN = TraceConfig_BufferConfig_FillPolicy_UNSPECIFIED;
|
|
const TraceConfig_BufferConfig_FillPolicy TraceConfig_BufferConfig_FillPolicy_MAX = TraceConfig_BufferConfig_FillPolicy_DISCARD;
|
|
|
|
class TraceConfig_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/29, /*HAS_NONPACKED_REPEATED_FIELDS=*/true> {
|
|
public:
|
|
TraceConfig_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
|
|
explicit TraceConfig_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
|
|
explicit TraceConfig_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
|
|
bool has_buffers() const { return at<1>().valid(); }
|
|
::protozero::RepeatedFieldIterator<::protozero::ConstBytes> buffers() const { return GetRepeated<::protozero::ConstBytes>(1); }
|
|
bool has_data_sources() const { return at<2>().valid(); }
|
|
::protozero::RepeatedFieldIterator<::protozero::ConstBytes> data_sources() const { return GetRepeated<::protozero::ConstBytes>(2); }
|
|
bool has_builtin_data_sources() const { return at<20>().valid(); }
|
|
::protozero::ConstBytes builtin_data_sources() const { return at<20>().as_bytes(); }
|
|
bool has_duration_ms() const { return at<3>().valid(); }
|
|
uint32_t duration_ms() const { return at<3>().as_uint32(); }
|
|
bool has_enable_extra_guardrails() const { return at<4>().valid(); }
|
|
bool enable_extra_guardrails() const { return at<4>().as_bool(); }
|
|
bool has_lockdown_mode() const { return at<5>().valid(); }
|
|
int32_t lockdown_mode() const { return at<5>().as_int32(); }
|
|
bool has_producers() const { return at<6>().valid(); }
|
|
::protozero::RepeatedFieldIterator<::protozero::ConstBytes> producers() const { return GetRepeated<::protozero::ConstBytes>(6); }
|
|
bool has_statsd_metadata() const { return at<7>().valid(); }
|
|
::protozero::ConstBytes statsd_metadata() const { return at<7>().as_bytes(); }
|
|
bool has_write_into_file() const { return at<8>().valid(); }
|
|
bool write_into_file() const { return at<8>().as_bool(); }
|
|
bool has_output_path() const { return at<29>().valid(); }
|
|
::protozero::ConstChars output_path() const { return at<29>().as_string(); }
|
|
bool has_file_write_period_ms() const { return at<9>().valid(); }
|
|
uint32_t file_write_period_ms() const { return at<9>().as_uint32(); }
|
|
bool has_max_file_size_bytes() const { return at<10>().valid(); }
|
|
uint64_t max_file_size_bytes() const { return at<10>().as_uint64(); }
|
|
bool has_guardrail_overrides() const { return at<11>().valid(); }
|
|
::protozero::ConstBytes guardrail_overrides() const { return at<11>().as_bytes(); }
|
|
bool has_deferred_start() const { return at<12>().valid(); }
|
|
bool deferred_start() const { return at<12>().as_bool(); }
|
|
bool has_flush_period_ms() const { return at<13>().valid(); }
|
|
uint32_t flush_period_ms() const { return at<13>().as_uint32(); }
|
|
bool has_flush_timeout_ms() const { return at<14>().valid(); }
|
|
uint32_t flush_timeout_ms() const { return at<14>().as_uint32(); }
|
|
bool has_data_source_stop_timeout_ms() const { return at<23>().valid(); }
|
|
uint32_t data_source_stop_timeout_ms() const { return at<23>().as_uint32(); }
|
|
bool has_notify_traceur() const { return at<16>().valid(); }
|
|
bool notify_traceur() const { return at<16>().as_bool(); }
|
|
bool has_trigger_config() const { return at<17>().valid(); }
|
|
::protozero::ConstBytes trigger_config() const { return at<17>().as_bytes(); }
|
|
bool has_activate_triggers() const { return at<18>().valid(); }
|
|
::protozero::RepeatedFieldIterator<::protozero::ConstChars> activate_triggers() const { return GetRepeated<::protozero::ConstChars>(18); }
|
|
bool has_incremental_state_config() const { return at<21>().valid(); }
|
|
::protozero::ConstBytes incremental_state_config() const { return at<21>().as_bytes(); }
|
|
bool has_allow_user_build_tracing() const { return at<19>().valid(); }
|
|
bool allow_user_build_tracing() const { return at<19>().as_bool(); }
|
|
bool has_unique_session_name() const { return at<22>().valid(); }
|
|
::protozero::ConstChars unique_session_name() const { return at<22>().as_string(); }
|
|
bool has_compression_type() const { return at<24>().valid(); }
|
|
int32_t compression_type() const { return at<24>().as_int32(); }
|
|
bool has_incident_report_config() const { return at<25>().valid(); }
|
|
::protozero::ConstBytes incident_report_config() const { return at<25>().as_bytes(); }
|
|
bool has_trace_uuid_msb() const { return at<27>().valid(); }
|
|
int64_t trace_uuid_msb() const { return at<27>().as_int64(); }
|
|
bool has_trace_uuid_lsb() const { return at<28>().valid(); }
|
|
int64_t trace_uuid_lsb() const { return at<28>().as_int64(); }
|
|
};
|
|
|
|
class TraceConfig : public ::protozero::Message {
|
|
public:
|
|
using Decoder = TraceConfig_Decoder;
|
|
enum : int32_t {
|
|
kBuffersFieldNumber = 1,
|
|
kDataSourcesFieldNumber = 2,
|
|
kBuiltinDataSourcesFieldNumber = 20,
|
|
kDurationMsFieldNumber = 3,
|
|
kEnableExtraGuardrailsFieldNumber = 4,
|
|
kLockdownModeFieldNumber = 5,
|
|
kProducersFieldNumber = 6,
|
|
kStatsdMetadataFieldNumber = 7,
|
|
kWriteIntoFileFieldNumber = 8,
|
|
kOutputPathFieldNumber = 29,
|
|
kFileWritePeriodMsFieldNumber = 9,
|
|
kMaxFileSizeBytesFieldNumber = 10,
|
|
kGuardrailOverridesFieldNumber = 11,
|
|
kDeferredStartFieldNumber = 12,
|
|
kFlushPeriodMsFieldNumber = 13,
|
|
kFlushTimeoutMsFieldNumber = 14,
|
|
kDataSourceStopTimeoutMsFieldNumber = 23,
|
|
kNotifyTraceurFieldNumber = 16,
|
|
kTriggerConfigFieldNumber = 17,
|
|
kActivateTriggersFieldNumber = 18,
|
|
kIncrementalStateConfigFieldNumber = 21,
|
|
kAllowUserBuildTracingFieldNumber = 19,
|
|
kUniqueSessionNameFieldNumber = 22,
|
|
kCompressionTypeFieldNumber = 24,
|
|
kIncidentReportConfigFieldNumber = 25,
|
|
kTraceUuidMsbFieldNumber = 27,
|
|
kTraceUuidLsbFieldNumber = 28,
|
|
};
|
|
using BufferConfig = ::perfetto::protos::pbzero::TraceConfig_BufferConfig;
|
|
using DataSource = ::perfetto::protos::pbzero::TraceConfig_DataSource;
|
|
using BuiltinDataSource = ::perfetto::protos::pbzero::TraceConfig_BuiltinDataSource;
|
|
using ProducerConfig = ::perfetto::protos::pbzero::TraceConfig_ProducerConfig;
|
|
using StatsdMetadata = ::perfetto::protos::pbzero::TraceConfig_StatsdMetadata;
|
|
using GuardrailOverrides = ::perfetto::protos::pbzero::TraceConfig_GuardrailOverrides;
|
|
using TriggerConfig = ::perfetto::protos::pbzero::TraceConfig_TriggerConfig;
|
|
using IncrementalStateConfig = ::perfetto::protos::pbzero::TraceConfig_IncrementalStateConfig;
|
|
using IncidentReportConfig = ::perfetto::protos::pbzero::TraceConfig_IncidentReportConfig;
|
|
using LockdownModeOperation = ::perfetto::protos::pbzero::TraceConfig_LockdownModeOperation;
|
|
using CompressionType = ::perfetto::protos::pbzero::TraceConfig_CompressionType;
|
|
static const LockdownModeOperation LOCKDOWN_UNCHANGED = TraceConfig_LockdownModeOperation_LOCKDOWN_UNCHANGED;
|
|
static const LockdownModeOperation LOCKDOWN_CLEAR = TraceConfig_LockdownModeOperation_LOCKDOWN_CLEAR;
|
|
static const LockdownModeOperation LOCKDOWN_SET = TraceConfig_LockdownModeOperation_LOCKDOWN_SET;
|
|
static const CompressionType COMPRESSION_TYPE_UNSPECIFIED = TraceConfig_CompressionType_COMPRESSION_TYPE_UNSPECIFIED;
|
|
static const CompressionType COMPRESSION_TYPE_DEFLATE = TraceConfig_CompressionType_COMPRESSION_TYPE_DEFLATE;
|
|
template <typename T = TraceConfig_BufferConfig> T* add_buffers() {
|
|
return BeginNestedMessage<T>(1);
|
|
}
|
|
|
|
template <typename T = TraceConfig_DataSource> T* add_data_sources() {
|
|
return BeginNestedMessage<T>(2);
|
|
}
|
|
|
|
template <typename T = TraceConfig_BuiltinDataSource> T* set_builtin_data_sources() {
|
|
return BeginNestedMessage<T>(20);
|
|
}
|
|
|
|
void set_duration_ms(uint32_t value) {
|
|
AppendVarInt(3, value);
|
|
}
|
|
void set_enable_extra_guardrails(bool value) {
|
|
AppendTinyVarInt(4, value);
|
|
}
|
|
void set_lockdown_mode(::perfetto::protos::pbzero::TraceConfig_LockdownModeOperation value) {
|
|
AppendTinyVarInt(5, value);
|
|
}
|
|
template <typename T = TraceConfig_ProducerConfig> T* add_producers() {
|
|
return BeginNestedMessage<T>(6);
|
|
}
|
|
|
|
template <typename T = TraceConfig_StatsdMetadata> T* set_statsd_metadata() {
|
|
return BeginNestedMessage<T>(7);
|
|
}
|
|
|
|
void set_write_into_file(bool value) {
|
|
AppendTinyVarInt(8, value);
|
|
}
|
|
void set_output_path(const std::string& value) {
|
|
AppendBytes(29, value.data(), value.size());
|
|
}
|
|
void set_output_path(const char* data, size_t size) {
|
|
AppendBytes(29, data, size);
|
|
}
|
|
void set_file_write_period_ms(uint32_t value) {
|
|
AppendVarInt(9, value);
|
|
}
|
|
void set_max_file_size_bytes(uint64_t value) {
|
|
AppendVarInt(10, value);
|
|
}
|
|
template <typename T = TraceConfig_GuardrailOverrides> T* set_guardrail_overrides() {
|
|
return BeginNestedMessage<T>(11);
|
|
}
|
|
|
|
void set_deferred_start(bool value) {
|
|
AppendTinyVarInt(12, value);
|
|
}
|
|
void set_flush_period_ms(uint32_t value) {
|
|
AppendVarInt(13, value);
|
|
}
|
|
void set_flush_timeout_ms(uint32_t value) {
|
|
AppendVarInt(14, value);
|
|
}
|
|
void set_data_source_stop_timeout_ms(uint32_t value) {
|
|
AppendVarInt(23, value);
|
|
}
|
|
void set_notify_traceur(bool value) {
|
|
AppendTinyVarInt(16, value);
|
|
}
|
|
template <typename T = TraceConfig_TriggerConfig> T* set_trigger_config() {
|
|
return BeginNestedMessage<T>(17);
|
|
}
|
|
|
|
void add_activate_triggers(const std::string& value) {
|
|
AppendBytes(18, value.data(), value.size());
|
|
}
|
|
void add_activate_triggers(const char* data, size_t size) {
|
|
AppendBytes(18, data, size);
|
|
}
|
|
template <typename T = TraceConfig_IncrementalStateConfig> T* set_incremental_state_config() {
|
|
return BeginNestedMessage<T>(21);
|
|
}
|
|
|
|
void set_allow_user_build_tracing(bool value) {
|
|
AppendTinyVarInt(19, value);
|
|
}
|
|
void set_unique_session_name(const std::string& value) {
|
|
AppendBytes(22, value.data(), value.size());
|
|
}
|
|
void set_unique_session_name(const char* data, size_t size) {
|
|
AppendBytes(22, data, size);
|
|
}
|
|
void set_compression_type(::perfetto::protos::pbzero::TraceConfig_CompressionType value) {
|
|
AppendTinyVarInt(24, value);
|
|
}
|
|
template <typename T = TraceConfig_IncidentReportConfig> T* set_incident_report_config() {
|
|
return BeginNestedMessage<T>(25);
|
|
}
|
|
|
|
void set_trace_uuid_msb(int64_t value) {
|
|
AppendVarInt(27, value);
|
|
}
|
|
void set_trace_uuid_lsb(int64_t value) {
|
|
AppendVarInt(28, value);
|
|
}
|
|
};
|
|
|
|
class TraceConfig_IncidentReportConfig_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/4, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
|
|
public:
|
|
TraceConfig_IncidentReportConfig_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
|
|
explicit TraceConfig_IncidentReportConfig_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
|
|
explicit TraceConfig_IncidentReportConfig_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
|
|
bool has_destination_package() const { return at<1>().valid(); }
|
|
::protozero::ConstChars destination_package() const { return at<1>().as_string(); }
|
|
bool has_destination_class() const { return at<2>().valid(); }
|
|
::protozero::ConstChars destination_class() const { return at<2>().as_string(); }
|
|
bool has_privacy_level() const { return at<3>().valid(); }
|
|
int32_t privacy_level() const { return at<3>().as_int32(); }
|
|
bool has_skip_dropbox() const { return at<4>().valid(); }
|
|
bool skip_dropbox() const { return at<4>().as_bool(); }
|
|
};
|
|
|
|
class TraceConfig_IncidentReportConfig : public ::protozero::Message {
|
|
public:
|
|
using Decoder = TraceConfig_IncidentReportConfig_Decoder;
|
|
enum : int32_t {
|
|
kDestinationPackageFieldNumber = 1,
|
|
kDestinationClassFieldNumber = 2,
|
|
kPrivacyLevelFieldNumber = 3,
|
|
kSkipDropboxFieldNumber = 4,
|
|
};
|
|
void set_destination_package(const std::string& value) {
|
|
AppendBytes(1, value.data(), value.size());
|
|
}
|
|
void set_destination_package(const char* data, size_t size) {
|
|
AppendBytes(1, data, size);
|
|
}
|
|
void set_destination_class(const std::string& value) {
|
|
AppendBytes(2, value.data(), value.size());
|
|
}
|
|
void set_destination_class(const char* data, size_t size) {
|
|
AppendBytes(2, data, size);
|
|
}
|
|
void set_privacy_level(int32_t value) {
|
|
AppendVarInt(3, value);
|
|
}
|
|
void set_skip_dropbox(bool value) {
|
|
AppendTinyVarInt(4, value);
|
|
}
|
|
};
|
|
|
|
class TraceConfig_IncrementalStateConfig_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/1, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
|
|
public:
|
|
TraceConfig_IncrementalStateConfig_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
|
|
explicit TraceConfig_IncrementalStateConfig_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
|
|
explicit TraceConfig_IncrementalStateConfig_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
|
|
bool has_clear_period_ms() const { return at<1>().valid(); }
|
|
uint32_t clear_period_ms() const { return at<1>().as_uint32(); }
|
|
};
|
|
|
|
class TraceConfig_IncrementalStateConfig : public ::protozero::Message {
|
|
public:
|
|
using Decoder = TraceConfig_IncrementalStateConfig_Decoder;
|
|
enum : int32_t {
|
|
kClearPeriodMsFieldNumber = 1,
|
|
};
|
|
void set_clear_period_ms(uint32_t value) {
|
|
AppendVarInt(1, value);
|
|
}
|
|
};
|
|
|
|
class TraceConfig_TriggerConfig_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/3, /*HAS_NONPACKED_REPEATED_FIELDS=*/true> {
|
|
public:
|
|
TraceConfig_TriggerConfig_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
|
|
explicit TraceConfig_TriggerConfig_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
|
|
explicit TraceConfig_TriggerConfig_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
|
|
bool has_trigger_mode() const { return at<1>().valid(); }
|
|
int32_t trigger_mode() const { return at<1>().as_int32(); }
|
|
bool has_triggers() const { return at<2>().valid(); }
|
|
::protozero::RepeatedFieldIterator<::protozero::ConstBytes> triggers() const { return GetRepeated<::protozero::ConstBytes>(2); }
|
|
bool has_trigger_timeout_ms() const { return at<3>().valid(); }
|
|
uint32_t trigger_timeout_ms() const { return at<3>().as_uint32(); }
|
|
};
|
|
|
|
class TraceConfig_TriggerConfig : public ::protozero::Message {
|
|
public:
|
|
using Decoder = TraceConfig_TriggerConfig_Decoder;
|
|
enum : int32_t {
|
|
kTriggerModeFieldNumber = 1,
|
|
kTriggersFieldNumber = 2,
|
|
kTriggerTimeoutMsFieldNumber = 3,
|
|
};
|
|
using Trigger = ::perfetto::protos::pbzero::TraceConfig_TriggerConfig_Trigger;
|
|
using TriggerMode = ::perfetto::protos::pbzero::TraceConfig_TriggerConfig_TriggerMode;
|
|
static const TriggerMode UNSPECIFIED = TraceConfig_TriggerConfig_TriggerMode_UNSPECIFIED;
|
|
static const TriggerMode START_TRACING = TraceConfig_TriggerConfig_TriggerMode_START_TRACING;
|
|
static const TriggerMode STOP_TRACING = TraceConfig_TriggerConfig_TriggerMode_STOP_TRACING;
|
|
void set_trigger_mode(::perfetto::protos::pbzero::TraceConfig_TriggerConfig_TriggerMode value) {
|
|
AppendTinyVarInt(1, value);
|
|
}
|
|
template <typename T = TraceConfig_TriggerConfig_Trigger> T* add_triggers() {
|
|
return BeginNestedMessage<T>(2);
|
|
}
|
|
|
|
void set_trigger_timeout_ms(uint32_t value) {
|
|
AppendVarInt(3, value);
|
|
}
|
|
};
|
|
|
|
class TraceConfig_TriggerConfig_Trigger_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/3, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
|
|
public:
|
|
TraceConfig_TriggerConfig_Trigger_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
|
|
explicit TraceConfig_TriggerConfig_Trigger_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
|
|
explicit TraceConfig_TriggerConfig_Trigger_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
|
|
bool has_name() const { return at<1>().valid(); }
|
|
::protozero::ConstChars name() const { return at<1>().as_string(); }
|
|
bool has_producer_name_regex() const { return at<2>().valid(); }
|
|
::protozero::ConstChars producer_name_regex() const { return at<2>().as_string(); }
|
|
bool has_stop_delay_ms() const { return at<3>().valid(); }
|
|
uint32_t stop_delay_ms() const { return at<3>().as_uint32(); }
|
|
};
|
|
|
|
class TraceConfig_TriggerConfig_Trigger : public ::protozero::Message {
|
|
public:
|
|
using Decoder = TraceConfig_TriggerConfig_Trigger_Decoder;
|
|
enum : int32_t {
|
|
kNameFieldNumber = 1,
|
|
kProducerNameRegexFieldNumber = 2,
|
|
kStopDelayMsFieldNumber = 3,
|
|
};
|
|
void set_name(const std::string& value) {
|
|
AppendBytes(1, value.data(), value.size());
|
|
}
|
|
void set_name(const char* data, size_t size) {
|
|
AppendBytes(1, data, size);
|
|
}
|
|
void set_producer_name_regex(const std::string& value) {
|
|
AppendBytes(2, value.data(), value.size());
|
|
}
|
|
void set_producer_name_regex(const char* data, size_t size) {
|
|
AppendBytes(2, data, size);
|
|
}
|
|
void set_stop_delay_ms(uint32_t value) {
|
|
AppendVarInt(3, value);
|
|
}
|
|
};
|
|
|
|
class TraceConfig_GuardrailOverrides_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/1, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
|
|
public:
|
|
TraceConfig_GuardrailOverrides_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
|
|
explicit TraceConfig_GuardrailOverrides_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
|
|
explicit TraceConfig_GuardrailOverrides_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
|
|
bool has_max_upload_per_day_bytes() const { return at<1>().valid(); }
|
|
uint64_t max_upload_per_day_bytes() const { return at<1>().as_uint64(); }
|
|
};
|
|
|
|
class TraceConfig_GuardrailOverrides : public ::protozero::Message {
|
|
public:
|
|
using Decoder = TraceConfig_GuardrailOverrides_Decoder;
|
|
enum : int32_t {
|
|
kMaxUploadPerDayBytesFieldNumber = 1,
|
|
};
|
|
void set_max_upload_per_day_bytes(uint64_t value) {
|
|
AppendVarInt(1, value);
|
|
}
|
|
};
|
|
|
|
class TraceConfig_StatsdMetadata_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/4, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
|
|
public:
|
|
TraceConfig_StatsdMetadata_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
|
|
explicit TraceConfig_StatsdMetadata_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
|
|
explicit TraceConfig_StatsdMetadata_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
|
|
bool has_triggering_alert_id() const { return at<1>().valid(); }
|
|
int64_t triggering_alert_id() const { return at<1>().as_int64(); }
|
|
bool has_triggering_config_uid() const { return at<2>().valid(); }
|
|
int32_t triggering_config_uid() const { return at<2>().as_int32(); }
|
|
bool has_triggering_config_id() const { return at<3>().valid(); }
|
|
int64_t triggering_config_id() const { return at<3>().as_int64(); }
|
|
bool has_triggering_subscription_id() const { return at<4>().valid(); }
|
|
int64_t triggering_subscription_id() const { return at<4>().as_int64(); }
|
|
};
|
|
|
|
class TraceConfig_StatsdMetadata : public ::protozero::Message {
|
|
public:
|
|
using Decoder = TraceConfig_StatsdMetadata_Decoder;
|
|
enum : int32_t {
|
|
kTriggeringAlertIdFieldNumber = 1,
|
|
kTriggeringConfigUidFieldNumber = 2,
|
|
kTriggeringConfigIdFieldNumber = 3,
|
|
kTriggeringSubscriptionIdFieldNumber = 4,
|
|
};
|
|
void set_triggering_alert_id(int64_t value) {
|
|
AppendVarInt(1, value);
|
|
}
|
|
void set_triggering_config_uid(int32_t value) {
|
|
AppendVarInt(2, value);
|
|
}
|
|
void set_triggering_config_id(int64_t value) {
|
|
AppendVarInt(3, value);
|
|
}
|
|
void set_triggering_subscription_id(int64_t value) {
|
|
AppendVarInt(4, value);
|
|
}
|
|
};
|
|
|
|
class TraceConfig_ProducerConfig_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/3, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
|
|
public:
|
|
TraceConfig_ProducerConfig_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
|
|
explicit TraceConfig_ProducerConfig_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
|
|
explicit TraceConfig_ProducerConfig_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
|
|
bool has_producer_name() const { return at<1>().valid(); }
|
|
::protozero::ConstChars producer_name() const { return at<1>().as_string(); }
|
|
bool has_shm_size_kb() const { return at<2>().valid(); }
|
|
uint32_t shm_size_kb() const { return at<2>().as_uint32(); }
|
|
bool has_page_size_kb() const { return at<3>().valid(); }
|
|
uint32_t page_size_kb() const { return at<3>().as_uint32(); }
|
|
};
|
|
|
|
class TraceConfig_ProducerConfig : public ::protozero::Message {
|
|
public:
|
|
using Decoder = TraceConfig_ProducerConfig_Decoder;
|
|
enum : int32_t {
|
|
kProducerNameFieldNumber = 1,
|
|
kShmSizeKbFieldNumber = 2,
|
|
kPageSizeKbFieldNumber = 3,
|
|
};
|
|
void set_producer_name(const std::string& value) {
|
|
AppendBytes(1, value.data(), value.size());
|
|
}
|
|
void set_producer_name(const char* data, size_t size) {
|
|
AppendBytes(1, data, size);
|
|
}
|
|
void set_shm_size_kb(uint32_t value) {
|
|
AppendVarInt(2, value);
|
|
}
|
|
void set_page_size_kb(uint32_t value) {
|
|
AppendVarInt(3, value);
|
|
}
|
|
};
|
|
|
|
class TraceConfig_BuiltinDataSource_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/6, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
|
|
public:
|
|
TraceConfig_BuiltinDataSource_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
|
|
explicit TraceConfig_BuiltinDataSource_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
|
|
explicit TraceConfig_BuiltinDataSource_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
|
|
bool has_disable_clock_snapshotting() const { return at<1>().valid(); }
|
|
bool disable_clock_snapshotting() const { return at<1>().as_bool(); }
|
|
bool has_disable_trace_config() const { return at<2>().valid(); }
|
|
bool disable_trace_config() const { return at<2>().as_bool(); }
|
|
bool has_disable_system_info() const { return at<3>().valid(); }
|
|
bool disable_system_info() const { return at<3>().as_bool(); }
|
|
bool has_disable_service_events() const { return at<4>().valid(); }
|
|
bool disable_service_events() const { return at<4>().as_bool(); }
|
|
bool has_primary_trace_clock() const { return at<5>().valid(); }
|
|
int32_t primary_trace_clock() const { return at<5>().as_int32(); }
|
|
bool has_snapshot_interval_ms() const { return at<6>().valid(); }
|
|
uint32_t snapshot_interval_ms() const { return at<6>().as_uint32(); }
|
|
};
|
|
|
|
class TraceConfig_BuiltinDataSource : public ::protozero::Message {
|
|
public:
|
|
using Decoder = TraceConfig_BuiltinDataSource_Decoder;
|
|
enum : int32_t {
|
|
kDisableClockSnapshottingFieldNumber = 1,
|
|
kDisableTraceConfigFieldNumber = 2,
|
|
kDisableSystemInfoFieldNumber = 3,
|
|
kDisableServiceEventsFieldNumber = 4,
|
|
kPrimaryTraceClockFieldNumber = 5,
|
|
kSnapshotIntervalMsFieldNumber = 6,
|
|
};
|
|
void set_disable_clock_snapshotting(bool value) {
|
|
AppendTinyVarInt(1, value);
|
|
}
|
|
void set_disable_trace_config(bool value) {
|
|
AppendTinyVarInt(2, value);
|
|
}
|
|
void set_disable_system_info(bool value) {
|
|
AppendTinyVarInt(3, value);
|
|
}
|
|
void set_disable_service_events(bool value) {
|
|
AppendTinyVarInt(4, value);
|
|
}
|
|
void set_primary_trace_clock(::perfetto::protos::pbzero::BuiltinClock value) {
|
|
AppendTinyVarInt(5, value);
|
|
}
|
|
void set_snapshot_interval_ms(uint32_t value) {
|
|
AppendVarInt(6, value);
|
|
}
|
|
};
|
|
|
|
class TraceConfig_DataSource_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/3, /*HAS_NONPACKED_REPEATED_FIELDS=*/true> {
|
|
public:
|
|
TraceConfig_DataSource_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
|
|
explicit TraceConfig_DataSource_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
|
|
explicit TraceConfig_DataSource_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
|
|
bool has_config() const { return at<1>().valid(); }
|
|
::protozero::ConstBytes config() const { return at<1>().as_bytes(); }
|
|
bool has_producer_name_filter() const { return at<2>().valid(); }
|
|
::protozero::RepeatedFieldIterator<::protozero::ConstChars> producer_name_filter() const { return GetRepeated<::protozero::ConstChars>(2); }
|
|
bool has_producer_name_regex_filter() const { return at<3>().valid(); }
|
|
::protozero::RepeatedFieldIterator<::protozero::ConstChars> producer_name_regex_filter() const { return GetRepeated<::protozero::ConstChars>(3); }
|
|
};
|
|
|
|
class TraceConfig_DataSource : public ::protozero::Message {
|
|
public:
|
|
using Decoder = TraceConfig_DataSource_Decoder;
|
|
enum : int32_t {
|
|
kConfigFieldNumber = 1,
|
|
kProducerNameFilterFieldNumber = 2,
|
|
kProducerNameRegexFilterFieldNumber = 3,
|
|
};
|
|
template <typename T = DataSourceConfig> T* set_config() {
|
|
return BeginNestedMessage<T>(1);
|
|
}
|
|
|
|
void add_producer_name_filter(const std::string& value) {
|
|
AppendBytes(2, value.data(), value.size());
|
|
}
|
|
void add_producer_name_filter(const char* data, size_t size) {
|
|
AppendBytes(2, data, size);
|
|
}
|
|
void add_producer_name_regex_filter(const std::string& value) {
|
|
AppendBytes(3, value.data(), value.size());
|
|
}
|
|
void add_producer_name_regex_filter(const char* data, size_t size) {
|
|
AppendBytes(3, data, size);
|
|
}
|
|
};
|
|
|
|
class TraceConfig_BufferConfig_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/4, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
|
|
public:
|
|
TraceConfig_BufferConfig_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
|
|
explicit TraceConfig_BufferConfig_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
|
|
explicit TraceConfig_BufferConfig_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
|
|
bool has_size_kb() const { return at<1>().valid(); }
|
|
uint32_t size_kb() const { return at<1>().as_uint32(); }
|
|
bool has_fill_policy() const { return at<4>().valid(); }
|
|
int32_t fill_policy() const { return at<4>().as_int32(); }
|
|
};
|
|
|
|
class TraceConfig_BufferConfig : public ::protozero::Message {
|
|
public:
|
|
using Decoder = TraceConfig_BufferConfig_Decoder;
|
|
enum : int32_t {
|
|
kSizeKbFieldNumber = 1,
|
|
kFillPolicyFieldNumber = 4,
|
|
};
|
|
using FillPolicy = ::perfetto::protos::pbzero::TraceConfig_BufferConfig_FillPolicy;
|
|
static const FillPolicy UNSPECIFIED = TraceConfig_BufferConfig_FillPolicy_UNSPECIFIED;
|
|
static const FillPolicy RING_BUFFER = TraceConfig_BufferConfig_FillPolicy_RING_BUFFER;
|
|
static const FillPolicy DISCARD = TraceConfig_BufferConfig_FillPolicy_DISCARD;
|
|
void set_size_kb(uint32_t value) {
|
|
AppendVarInt(1, value);
|
|
}
|
|
void set_fill_policy(::perfetto::protos::pbzero::TraceConfig_BufferConfig_FillPolicy value) {
|
|
AppendTinyVarInt(4, value);
|
|
}
|
|
};
|
|
|
|
} // Namespace.
|
|
} // Namespace.
|
|
} // Namespace.
|
|
#endif // Include guard.
|
|
// gen_amalgamated begin header: gen/protos/perfetto/trace/clock_snapshot.pbzero.h
|
|
// Autogenerated by the ProtoZero compiler plugin. DO NOT EDIT.
|
|
|
|
#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_CLOCK_SNAPSHOT_PROTO_H_
|
|
#define PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_CLOCK_SNAPSHOT_PROTO_H_
|
|
|
|
#include <stddef.h>
|
|
#include <stdint.h>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/proto_utils.h"
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace pbzero {
|
|
|
|
class ClockSnapshot_Clock;
|
|
enum BuiltinClock : int32_t;
|
|
|
|
enum ClockSnapshot_Clock_BuiltinClocks : int32_t {
|
|
ClockSnapshot_Clock_BuiltinClocks_UNKNOWN = 0,
|
|
ClockSnapshot_Clock_BuiltinClocks_REALTIME = 1,
|
|
ClockSnapshot_Clock_BuiltinClocks_REALTIME_COARSE = 2,
|
|
ClockSnapshot_Clock_BuiltinClocks_MONOTONIC = 3,
|
|
ClockSnapshot_Clock_BuiltinClocks_MONOTONIC_COARSE = 4,
|
|
ClockSnapshot_Clock_BuiltinClocks_MONOTONIC_RAW = 5,
|
|
ClockSnapshot_Clock_BuiltinClocks_BOOTTIME = 6,
|
|
ClockSnapshot_Clock_BuiltinClocks_BUILTIN_CLOCK_MAX_ID = 63,
|
|
};
|
|
|
|
const ClockSnapshot_Clock_BuiltinClocks ClockSnapshot_Clock_BuiltinClocks_MIN = ClockSnapshot_Clock_BuiltinClocks_UNKNOWN;
|
|
const ClockSnapshot_Clock_BuiltinClocks ClockSnapshot_Clock_BuiltinClocks_MAX = ClockSnapshot_Clock_BuiltinClocks_BUILTIN_CLOCK_MAX_ID;
|
|
|
|
class ClockSnapshot_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/2, /*HAS_NONPACKED_REPEATED_FIELDS=*/true> {
|
|
public:
|
|
ClockSnapshot_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
|
|
explicit ClockSnapshot_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
|
|
explicit ClockSnapshot_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
|
|
bool has_clocks() const { return at<1>().valid(); }
|
|
::protozero::RepeatedFieldIterator<::protozero::ConstBytes> clocks() const { return GetRepeated<::protozero::ConstBytes>(1); }
|
|
bool has_primary_trace_clock() const { return at<2>().valid(); }
|
|
int32_t primary_trace_clock() const { return at<2>().as_int32(); }
|
|
};
|
|
|
|
class ClockSnapshot : public ::protozero::Message {
|
|
public:
|
|
using Decoder = ClockSnapshot_Decoder;
|
|
enum : int32_t {
|
|
kClocksFieldNumber = 1,
|
|
kPrimaryTraceClockFieldNumber = 2,
|
|
};
|
|
using Clock = ::perfetto::protos::pbzero::ClockSnapshot_Clock;
|
|
template <typename T = ClockSnapshot_Clock> T* add_clocks() {
|
|
return BeginNestedMessage<T>(1);
|
|
}
|
|
|
|
void set_primary_trace_clock(::perfetto::protos::pbzero::BuiltinClock value) {
|
|
AppendTinyVarInt(2, value);
|
|
}
|
|
};
|
|
|
|
class ClockSnapshot_Clock_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/4, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
|
|
public:
|
|
ClockSnapshot_Clock_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
|
|
explicit ClockSnapshot_Clock_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
|
|
explicit ClockSnapshot_Clock_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
|
|
bool has_clock_id() const { return at<1>().valid(); }
|
|
uint32_t clock_id() const { return at<1>().as_uint32(); }
|
|
bool has_timestamp() const { return at<2>().valid(); }
|
|
uint64_t timestamp() const { return at<2>().as_uint64(); }
|
|
bool has_is_incremental() const { return at<3>().valid(); }
|
|
bool is_incremental() const { return at<3>().as_bool(); }
|
|
bool has_unit_multiplier_ns() const { return at<4>().valid(); }
|
|
uint64_t unit_multiplier_ns() const { return at<4>().as_uint64(); }
|
|
};
|
|
|
|
class ClockSnapshot_Clock : public ::protozero::Message {
|
|
public:
|
|
using Decoder = ClockSnapshot_Clock_Decoder;
|
|
enum : int32_t {
|
|
kClockIdFieldNumber = 1,
|
|
kTimestampFieldNumber = 2,
|
|
kIsIncrementalFieldNumber = 3,
|
|
kUnitMultiplierNsFieldNumber = 4,
|
|
};
|
|
using BuiltinClocks = ::perfetto::protos::pbzero::ClockSnapshot_Clock_BuiltinClocks;
|
|
static const BuiltinClocks UNKNOWN = ClockSnapshot_Clock_BuiltinClocks_UNKNOWN;
|
|
static const BuiltinClocks REALTIME = ClockSnapshot_Clock_BuiltinClocks_REALTIME;
|
|
static const BuiltinClocks REALTIME_COARSE = ClockSnapshot_Clock_BuiltinClocks_REALTIME_COARSE;
|
|
static const BuiltinClocks MONOTONIC = ClockSnapshot_Clock_BuiltinClocks_MONOTONIC;
|
|
static const BuiltinClocks MONOTONIC_COARSE = ClockSnapshot_Clock_BuiltinClocks_MONOTONIC_COARSE;
|
|
static const BuiltinClocks MONOTONIC_RAW = ClockSnapshot_Clock_BuiltinClocks_MONOTONIC_RAW;
|
|
static const BuiltinClocks BOOTTIME = ClockSnapshot_Clock_BuiltinClocks_BOOTTIME;
|
|
static const BuiltinClocks BUILTIN_CLOCK_MAX_ID = ClockSnapshot_Clock_BuiltinClocks_BUILTIN_CLOCK_MAX_ID;
|
|
void set_clock_id(uint32_t value) {
|
|
AppendVarInt(1, value);
|
|
}
|
|
void set_timestamp(uint64_t value) {
|
|
AppendVarInt(2, value);
|
|
}
|
|
void set_is_incremental(bool value) {
|
|
AppendTinyVarInt(3, value);
|
|
}
|
|
void set_unit_multiplier_ns(uint64_t value) {
|
|
AppendVarInt(4, value);
|
|
}
|
|
};
|
|
|
|
} // Namespace.
|
|
} // Namespace.
|
|
} // Namespace.
|
|
#endif // Include guard.
|
|
// gen_amalgamated begin header: gen/protos/perfetto/trace/perfetto/tracing_service_event.pbzero.h
|
|
// Autogenerated by the ProtoZero compiler plugin. DO NOT EDIT.
|
|
|
|
#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_PERFETTO_TRACING_SERVICE_EVENT_PROTO_H_
|
|
#define PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_PERFETTO_TRACING_SERVICE_EVENT_PROTO_H_
|
|
|
|
#include <stddef.h>
|
|
#include <stdint.h>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/proto_utils.h"
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace pbzero {
|
|
|
|
|
|
class TracingServiceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/5, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
|
|
public:
|
|
TracingServiceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
|
|
explicit TracingServiceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
|
|
explicit TracingServiceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
|
|
bool has_tracing_started() const { return at<2>().valid(); }
|
|
bool tracing_started() const { return at<2>().as_bool(); }
|
|
bool has_all_data_sources_started() const { return at<1>().valid(); }
|
|
bool all_data_sources_started() const { return at<1>().as_bool(); }
|
|
bool has_all_data_sources_flushed() const { return at<3>().valid(); }
|
|
bool all_data_sources_flushed() const { return at<3>().as_bool(); }
|
|
bool has_read_tracing_buffers_completed() const { return at<4>().valid(); }
|
|
bool read_tracing_buffers_completed() const { return at<4>().as_bool(); }
|
|
bool has_tracing_disabled() const { return at<5>().valid(); }
|
|
bool tracing_disabled() const { return at<5>().as_bool(); }
|
|
};
|
|
|
|
class TracingServiceEvent : public ::protozero::Message {
|
|
public:
|
|
using Decoder = TracingServiceEvent_Decoder;
|
|
enum : int32_t {
|
|
kTracingStartedFieldNumber = 2,
|
|
kAllDataSourcesStartedFieldNumber = 1,
|
|
kAllDataSourcesFlushedFieldNumber = 3,
|
|
kReadTracingBuffersCompletedFieldNumber = 4,
|
|
kTracingDisabledFieldNumber = 5,
|
|
};
|
|
void set_tracing_started(bool value) {
|
|
AppendTinyVarInt(2, value);
|
|
}
|
|
void set_all_data_sources_started(bool value) {
|
|
AppendTinyVarInt(1, value);
|
|
}
|
|
void set_all_data_sources_flushed(bool value) {
|
|
AppendTinyVarInt(3, value);
|
|
}
|
|
void set_read_tracing_buffers_completed(bool value) {
|
|
AppendTinyVarInt(4, value);
|
|
}
|
|
void set_tracing_disabled(bool value) {
|
|
AppendTinyVarInt(5, value);
|
|
}
|
|
};
|
|
|
|
} // Namespace.
|
|
} // Namespace.
|
|
} // Namespace.
|
|
#endif // Include guard.
|
|
// gen_amalgamated begin header: gen/protos/perfetto/trace/system_info.pbzero.h
|
|
// Autogenerated by the ProtoZero compiler plugin. DO NOT EDIT.
|
|
|
|
#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_SYSTEM_INFO_PROTO_H_
|
|
#define PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_SYSTEM_INFO_PROTO_H_
|
|
|
|
#include <stddef.h>
|
|
#include <stdint.h>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/proto_utils.h"
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace pbzero {
|
|
|
|
class Utsname;
|
|
|
|
class SystemInfo_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/3, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
|
|
public:
|
|
SystemInfo_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
|
|
explicit SystemInfo_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
|
|
explicit SystemInfo_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
|
|
bool has_utsname() const { return at<1>().valid(); }
|
|
::protozero::ConstBytes utsname() const { return at<1>().as_bytes(); }
|
|
bool has_android_build_fingerprint() const { return at<2>().valid(); }
|
|
::protozero::ConstChars android_build_fingerprint() const { return at<2>().as_string(); }
|
|
bool has_hz() const { return at<3>().valid(); }
|
|
int64_t hz() const { return at<3>().as_int64(); }
|
|
};
|
|
|
|
class SystemInfo : public ::protozero::Message {
|
|
public:
|
|
using Decoder = SystemInfo_Decoder;
|
|
enum : int32_t {
|
|
kUtsnameFieldNumber = 1,
|
|
kAndroidBuildFingerprintFieldNumber = 2,
|
|
kHzFieldNumber = 3,
|
|
};
|
|
template <typename T = Utsname> T* set_utsname() {
|
|
return BeginNestedMessage<T>(1);
|
|
}
|
|
|
|
void set_android_build_fingerprint(const std::string& value) {
|
|
AppendBytes(2, value.data(), value.size());
|
|
}
|
|
void set_android_build_fingerprint(const char* data, size_t size) {
|
|
AppendBytes(2, data, size);
|
|
}
|
|
void set_hz(int64_t value) {
|
|
AppendVarInt(3, value);
|
|
}
|
|
};
|
|
|
|
class Utsname_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/4, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
|
|
public:
|
|
Utsname_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
|
|
explicit Utsname_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
|
|
explicit Utsname_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
|
|
bool has_sysname() const { return at<1>().valid(); }
|
|
::protozero::ConstChars sysname() const { return at<1>().as_string(); }
|
|
bool has_version() const { return at<2>().valid(); }
|
|
::protozero::ConstChars version() const { return at<2>().as_string(); }
|
|
bool has_release() const { return at<3>().valid(); }
|
|
::protozero::ConstChars release() const { return at<3>().as_string(); }
|
|
bool has_machine() const { return at<4>().valid(); }
|
|
::protozero::ConstChars machine() const { return at<4>().as_string(); }
|
|
};
|
|
|
|
class Utsname : public ::protozero::Message {
|
|
public:
|
|
using Decoder = Utsname_Decoder;
|
|
enum : int32_t {
|
|
kSysnameFieldNumber = 1,
|
|
kVersionFieldNumber = 2,
|
|
kReleaseFieldNumber = 3,
|
|
kMachineFieldNumber = 4,
|
|
};
|
|
void set_sysname(const std::string& value) {
|
|
AppendBytes(1, value.data(), value.size());
|
|
}
|
|
void set_sysname(const char* data, size_t size) {
|
|
AppendBytes(1, data, size);
|
|
}
|
|
void set_version(const std::string& value) {
|
|
AppendBytes(2, value.data(), value.size());
|
|
}
|
|
void set_version(const char* data, size_t size) {
|
|
AppendBytes(2, data, size);
|
|
}
|
|
void set_release(const std::string& value) {
|
|
AppendBytes(3, value.data(), value.size());
|
|
}
|
|
void set_release(const char* data, size_t size) {
|
|
AppendBytes(3, data, size);
|
|
}
|
|
void set_machine(const std::string& value) {
|
|
AppendBytes(4, value.data(), value.size());
|
|
}
|
|
void set_machine(const char* data, size_t size) {
|
|
AppendBytes(4, data, size);
|
|
}
|
|
};
|
|
|
|
} // Namespace.
|
|
} // Namespace.
|
|
} // Namespace.
|
|
#endif // Include guard.
|
|
// gen_amalgamated begin header: gen/protos/perfetto/trace/trigger.pbzero.h
|
|
// Autogenerated by the ProtoZero compiler plugin. DO NOT EDIT.
|
|
|
|
#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRIGGER_PROTO_H_
|
|
#define PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRIGGER_PROTO_H_
|
|
|
|
#include <stddef.h>
|
|
#include <stdint.h>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/proto_utils.h"
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace pbzero {
|
|
|
|
|
|
class Trigger_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/3, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
|
|
public:
|
|
Trigger_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
|
|
explicit Trigger_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
|
|
explicit Trigger_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
|
|
bool has_trigger_name() const { return at<1>().valid(); }
|
|
::protozero::ConstChars trigger_name() const { return at<1>().as_string(); }
|
|
bool has_producer_name() const { return at<2>().valid(); }
|
|
::protozero::ConstChars producer_name() const { return at<2>().as_string(); }
|
|
bool has_trusted_producer_uid() const { return at<3>().valid(); }
|
|
int32_t trusted_producer_uid() const { return at<3>().as_int32(); }
|
|
};
|
|
|
|
class Trigger : public ::protozero::Message {
|
|
public:
|
|
using Decoder = Trigger_Decoder;
|
|
enum : int32_t {
|
|
kTriggerNameFieldNumber = 1,
|
|
kProducerNameFieldNumber = 2,
|
|
kTrustedProducerUidFieldNumber = 3,
|
|
};
|
|
void set_trigger_name(const std::string& value) {
|
|
AppendBytes(1, value.data(), value.size());
|
|
}
|
|
void set_trigger_name(const char* data, size_t size) {
|
|
AppendBytes(1, data, size);
|
|
}
|
|
void set_producer_name(const std::string& value) {
|
|
AppendBytes(2, value.data(), value.size());
|
|
}
|
|
void set_producer_name(const char* data, size_t size) {
|
|
AppendBytes(2, data, size);
|
|
}
|
|
void set_trusted_producer_uid(int32_t value) {
|
|
AppendVarInt(3, value);
|
|
}
|
|
};
|
|
|
|
} // Namespace.
|
|
} // Namespace.
|
|
} // Namespace.
|
|
#endif // Include guard.
|
|
/*
|
|
* Copyright (C) 2017 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
// gen_amalgamated expanded: #include "src/tracing/core/tracing_service_impl.h"
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/base/build_config.h"
|
|
|
|
#include <errno.h>
|
|
#include <inttypes.h>
|
|
#include <limits.h>
|
|
#include <string.h>
|
|
#include <regex>
|
|
#include <unordered_set>
|
|
|
|
#if !PERFETTO_BUILDFLAG(PERFETTO_OS_WIN) && \
|
|
!PERFETTO_BUILDFLAG(PERFETTO_OS_NACL)
|
|
#include <sys/uio.h>
|
|
#include <sys/utsname.h>
|
|
#include <unistd.h>
|
|
#endif
|
|
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_OS_ANDROID)
|
|
#include <sys/system_properties.h>
|
|
#endif
|
|
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_OS_ANDROID) || \
|
|
PERFETTO_BUILDFLAG(PERFETTO_OS_LINUX) || \
|
|
PERFETTO_BUILDFLAG(PERFETTO_OS_APPLE)
|
|
#define PERFETTO_HAS_CHMOD
|
|
#include <sys/stat.h>
|
|
#endif
|
|
|
|
#include <algorithm>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/base/build_config.h"
|
|
// gen_amalgamated expanded: #include "perfetto/base/task_runner.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/file_utils.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/metatrace.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/string_utils.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/utils.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/watchdog.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/consumer.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/observable_events.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/producer.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/shared_memory.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/shared_memory_abi.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/trace_packet.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/trace_writer.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/scattered_heap_buffer.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/static_buffer.h"
|
|
// gen_amalgamated expanded: #include "perfetto/tracing/core/data_source_descriptor.h"
|
|
// gen_amalgamated expanded: #include "perfetto/tracing/core/tracing_service_capabilities.h"
|
|
// gen_amalgamated expanded: #include "perfetto/tracing/core/tracing_service_state.h"
|
|
// gen_amalgamated expanded: #include "src/tracing/core/packet_stream_validator.h"
|
|
// gen_amalgamated expanded: #include "src/tracing/core/shared_memory_arbiter_impl.h"
|
|
// gen_amalgamated expanded: #include "src/tracing/core/trace_buffer.h"
|
|
|
|
// gen_amalgamated expanded: #include "protos/perfetto/common/builtin_clock.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/common/builtin_clock.pbzero.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/common/trace_stats.pbzero.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/trace_config.pbzero.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/trace/clock_snapshot.pbzero.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/trace/perfetto/tracing_service_event.pbzero.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/trace/system_info.pbzero.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/trace/trace_packet.pbzero.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/trace/trigger.pbzero.h"
|
|
|
|
// General note: this class must assume that Producers are malicious and will
|
|
// try to crash / exploit this class. We can trust pointers because they come
|
|
// from the IPC layer, but we should never assume that that the producer calls
|
|
// come in the right order or their arguments are sane / within bounds.
|
|
|
|
namespace perfetto {
|
|
|
|
namespace {
|
|
constexpr int kMaxBuffersPerConsumer = 128;
|
|
constexpr uint32_t kDefaultSnapshotsIntervalMs = 10 * 1000;
|
|
constexpr int kDefaultWriteIntoFilePeriodMs = 5000;
|
|
constexpr int kMaxConcurrentTracingSessions = 15;
|
|
constexpr int kMaxConcurrentTracingSessionsPerUid = 5;
|
|
constexpr int kMaxConcurrentTracingSessionsForStatsdUid = 10;
|
|
constexpr int64_t kMinSecondsBetweenTracesGuardrail = 5 * 60;
|
|
|
|
constexpr uint32_t kMillisPerHour = 3600000;
|
|
constexpr uint32_t kMaxTracingDurationMillis = 7 * 24 * kMillisPerHour;
|
|
|
|
// These apply only if enable_extra_guardrails is true.
|
|
constexpr uint32_t kGuardrailsMaxTracingBufferSizeKb = 128 * 1024;
|
|
constexpr uint32_t kGuardrailsMaxTracingDurationMillis = 24 * kMillisPerHour;
|
|
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_OS_WIN) || PERFETTO_BUILDFLAG(PERFETTO_OS_NACL)
|
|
struct iovec {
|
|
void* iov_base; // Address
|
|
size_t iov_len; // Block size
|
|
};
|
|
|
|
// Simple implementation of writev. Note that this does not give the atomicity
|
|
// guarantees of a real writev, but we don't depend on these (we aren't writing
|
|
// to the same file from another thread).
|
|
ssize_t writev(int fd, const struct iovec* iov, int iovcnt) {
|
|
ssize_t total_size = 0;
|
|
for (int i = 0; i < iovcnt; ++i) {
|
|
ssize_t current_size = base::WriteAll(fd, iov[i].iov_base, iov[i].iov_len);
|
|
if (current_size != static_cast<ssize_t>(iov[i].iov_len))
|
|
return -1;
|
|
total_size += current_size;
|
|
}
|
|
return total_size;
|
|
}
|
|
|
|
#define IOV_MAX 1024 // Linux compatible limit.
|
|
|
|
// uid checking is a NOP on Windows.
|
|
uid_t getuid() {
|
|
return 0;
|
|
}
|
|
uid_t geteuid() {
|
|
return 0;
|
|
}
|
|
#endif // PERFETTO_BUILDFLAG(PERFETTO_OS_WIN) ||
|
|
// PERFETTO_BUILDFLAG(PERFETTO_OS_NACL)
|
|
|
|
// Partially encodes a CommitDataRequest in an int32 for the purposes of
|
|
// metatracing. Note that it encodes only the bottom 10 bits of the producer id
|
|
// (which is technically 16 bits wide).
|
|
//
|
|
// Format (by bit range):
|
|
// [ 31 ][ 30 ][ 29:20 ][ 19:10 ][ 9:0]
|
|
// [unused][has flush id][num chunks to patch][num chunks to move][producer id]
|
|
static int32_t EncodeCommitDataRequest(ProducerID producer_id,
|
|
const CommitDataRequest& req_untrusted) {
|
|
uint32_t cmov = static_cast<uint32_t>(req_untrusted.chunks_to_move_size());
|
|
uint32_t cpatch = static_cast<uint32_t>(req_untrusted.chunks_to_patch_size());
|
|
uint32_t has_flush_id = req_untrusted.flush_request_id() != 0;
|
|
|
|
uint32_t mask = (1 << 10) - 1;
|
|
uint32_t acc = 0;
|
|
acc |= has_flush_id << 30;
|
|
acc |= (cpatch & mask) << 20;
|
|
acc |= (cmov & mask) << 10;
|
|
acc |= (producer_id & mask);
|
|
return static_cast<int32_t>(acc);
|
|
}
|
|
|
|
void SerializeAndAppendPacket(std::vector<TracePacket>* packets,
|
|
std::vector<uint8_t> packet) {
|
|
Slice slice = Slice::Allocate(packet.size());
|
|
memcpy(slice.own_data(), packet.data(), packet.size());
|
|
packets->emplace_back();
|
|
packets->back().AddSlice(std::move(slice));
|
|
}
|
|
|
|
std::tuple<size_t /*shm_size*/, size_t /*page_size*/> EnsureValidShmSizes(
|
|
size_t shm_size,
|
|
size_t page_size) {
|
|
// Theoretically the max page size supported by the ABI is 64KB.
|
|
// However, the current implementation of TraceBuffer (the non-shared
|
|
// userspace buffer where the service copies data) supports at most
|
|
// 32K. Setting 64K "works" from the producer<>consumer viewpoint
|
|
// but then causes the data to be discarded when copying it into
|
|
// TraceBuffer.
|
|
constexpr size_t kMaxPageSize = 32 * 1024;
|
|
static_assert(kMaxPageSize <= SharedMemoryABI::kMaxPageSize, "");
|
|
|
|
if (page_size == 0)
|
|
page_size = TracingServiceImpl::kDefaultShmPageSize;
|
|
if (shm_size == 0)
|
|
shm_size = TracingServiceImpl::kDefaultShmSize;
|
|
|
|
page_size = std::min<size_t>(page_size, kMaxPageSize);
|
|
shm_size = std::min<size_t>(shm_size, TracingServiceImpl::kMaxShmSize);
|
|
|
|
// The tracing page size has to be multiple of 4K. On some systems (e.g. Mac
|
|
// on Arm64) the system page size can be larger (e.g., 16K). That doesn't
|
|
// matter here, because the tracing page size is just a logical partitioning
|
|
// and does not have any dependencies on kernel mm syscalls (read: it's fine
|
|
// to have trace page sizes of 4K on a system where the kernel page size is
|
|
// 16K).
|
|
bool page_size_is_valid = page_size >= SharedMemoryABI::kMinPageSize;
|
|
page_size_is_valid &= page_size % SharedMemoryABI::kMinPageSize == 0;
|
|
|
|
// Only allow power of two numbers of pages, i.e. 1, 2, 4, 8 pages.
|
|
size_t num_pages = page_size / SharedMemoryABI::kMinPageSize;
|
|
page_size_is_valid &= (num_pages & (num_pages - 1)) == 0;
|
|
|
|
if (!page_size_is_valid || shm_size < page_size ||
|
|
shm_size % page_size != 0) {
|
|
return std::make_tuple(TracingServiceImpl::kDefaultShmSize,
|
|
TracingServiceImpl::kDefaultShmPageSize);
|
|
}
|
|
return std::make_tuple(shm_size, page_size);
|
|
}
|
|
|
|
bool NameMatchesFilter(const std::string& name,
|
|
const std::vector<std::string>& name_filter,
|
|
const std::vector<std::string>& name_regex_filter) {
|
|
bool filter_is_set = !name_filter.empty() || !name_regex_filter.empty();
|
|
if (!filter_is_set)
|
|
return true;
|
|
bool filter_matches = std::find(name_filter.begin(), name_filter.end(),
|
|
name) != name_filter.end();
|
|
bool filter_regex_matches =
|
|
std::find_if(name_regex_filter.begin(), name_regex_filter.end(),
|
|
[&](const std::string& regex) {
|
|
return std::regex_match(
|
|
name, std::regex(regex, std::regex::extended));
|
|
}) != name_regex_filter.end();
|
|
return filter_matches || filter_regex_matches;
|
|
}
|
|
|
|
// Used when write_into_file == true and output_path is not empty.
|
|
base::ScopedFile CreateTraceFile(const std::string& path) {
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_ANDROID_BUILD)
|
|
static const char kBase[] = "/data/misc/perfetto-traces/";
|
|
if (!base::StartsWith(path, kBase) || path.rfind('/') != strlen(kBase) - 1) {
|
|
PERFETTO_ELOG("Invalid output_path %s. On Android it must be within %s.",
|
|
path.c_str(), kBase);
|
|
return base::ScopedFile();
|
|
}
|
|
#endif
|
|
// O_CREAT | O_EXCL will fail if the file exists already.
|
|
auto fd = base::OpenFile(path, O_RDWR | O_CREAT | O_EXCL, 0600);
|
|
if (!fd)
|
|
PERFETTO_PLOG("Failed to create %s", path.c_str());
|
|
#if defined(PERFETTO_HAS_CHMOD)
|
|
// Passing 0644 directly above won't work because of umask.
|
|
PERFETTO_CHECK(fchmod(*fd, 0644) == 0);
|
|
#endif
|
|
return fd;
|
|
}
|
|
|
|
} // namespace
|
|
|
|
// These constants instead are defined in the header because are used by tests.
|
|
constexpr size_t TracingServiceImpl::kDefaultShmSize;
|
|
constexpr size_t TracingServiceImpl::kDefaultShmPageSize;
|
|
|
|
constexpr size_t TracingServiceImpl::kMaxShmSize;
|
|
constexpr uint32_t TracingServiceImpl::kDataSourceStopTimeoutMs;
|
|
constexpr uint8_t TracingServiceImpl::kSyncMarker[];
|
|
|
|
// static
|
|
std::unique_ptr<TracingService> TracingService::CreateInstance(
|
|
std::unique_ptr<SharedMemory::Factory> shm_factory,
|
|
base::TaskRunner* task_runner) {
|
|
return std::unique_ptr<TracingService>(
|
|
new TracingServiceImpl(std::move(shm_factory), task_runner));
|
|
}
|
|
|
|
TracingServiceImpl::TracingServiceImpl(
|
|
std::unique_ptr<SharedMemory::Factory> shm_factory,
|
|
base::TaskRunner* task_runner)
|
|
: task_runner_(task_runner),
|
|
shm_factory_(std::move(shm_factory)),
|
|
uid_(getuid()),
|
|
buffer_ids_(kMaxTraceBufferID),
|
|
weak_ptr_factory_(this) {
|
|
PERFETTO_DCHECK(task_runner_);
|
|
}
|
|
|
|
TracingServiceImpl::~TracingServiceImpl() {
|
|
// TODO(fmayer): handle teardown of all Producer.
|
|
}
|
|
|
|
std::unique_ptr<TracingService::ProducerEndpoint>
|
|
TracingServiceImpl::ConnectProducer(Producer* producer,
|
|
uid_t uid,
|
|
const std::string& producer_name,
|
|
size_t shared_memory_size_hint_bytes,
|
|
bool in_process,
|
|
ProducerSMBScrapingMode smb_scraping_mode,
|
|
size_t shared_memory_page_size_hint_bytes,
|
|
std::unique_ptr<SharedMemory> shm) {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
|
|
if (lockdown_mode_ && uid != geteuid()) {
|
|
PERFETTO_DLOG("Lockdown mode. Rejecting producer with UID %ld",
|
|
static_cast<unsigned long>(uid));
|
|
return nullptr;
|
|
}
|
|
|
|
if (producers_.size() >= kMaxProducerID) {
|
|
PERFETTO_DFATAL("Too many producers.");
|
|
return nullptr;
|
|
}
|
|
const ProducerID id = GetNextProducerID();
|
|
PERFETTO_DLOG("Producer %" PRIu16 " connected", id);
|
|
|
|
bool smb_scraping_enabled = smb_scraping_enabled_;
|
|
switch (smb_scraping_mode) {
|
|
case ProducerSMBScrapingMode::kDefault:
|
|
break;
|
|
case ProducerSMBScrapingMode::kEnabled:
|
|
smb_scraping_enabled = true;
|
|
break;
|
|
case ProducerSMBScrapingMode::kDisabled:
|
|
smb_scraping_enabled = false;
|
|
break;
|
|
}
|
|
|
|
std::unique_ptr<ProducerEndpointImpl> endpoint(new ProducerEndpointImpl(
|
|
id, uid, this, task_runner_, producer, producer_name, in_process,
|
|
smb_scraping_enabled));
|
|
auto it_and_inserted = producers_.emplace(id, endpoint.get());
|
|
PERFETTO_DCHECK(it_and_inserted.second);
|
|
endpoint->shmem_size_hint_bytes_ = shared_memory_size_hint_bytes;
|
|
endpoint->shmem_page_size_hint_bytes_ = shared_memory_page_size_hint_bytes;
|
|
|
|
// Producer::OnConnect() should run before Producer::OnTracingSetup(). The
|
|
// latter may be posted by SetupSharedMemory() below, so post OnConnect() now.
|
|
task_runner_->PostTask(std::bind(&Producer::OnConnect, endpoint->producer_));
|
|
|
|
if (shm) {
|
|
// The producer supplied an SMB. This is used only by Chrome; in the most
|
|
// common cases the SMB is created by the service and passed via
|
|
// OnTracingSetup(). Verify that it is correctly sized before we attempt to
|
|
// use it. The transport layer has to verify the integrity of the SMB (e.g.
|
|
// ensure that the producer can't resize if after the fact).
|
|
size_t shm_size, page_size;
|
|
std::tie(shm_size, page_size) =
|
|
EnsureValidShmSizes(shm->size(), endpoint->shmem_page_size_hint_bytes_);
|
|
if (shm_size == shm->size() &&
|
|
page_size == endpoint->shmem_page_size_hint_bytes_) {
|
|
PERFETTO_DLOG(
|
|
"Adopting producer-provided SMB of %zu kB for producer \"%s\"",
|
|
shm_size / 1024, endpoint->name_.c_str());
|
|
endpoint->SetupSharedMemory(std::move(shm), page_size,
|
|
/*provided_by_producer=*/true);
|
|
} else {
|
|
PERFETTO_LOG(
|
|
"Discarding incorrectly sized producer-provided SMB for producer "
|
|
"\"%s\", falling back to service-provided SMB. Requested sizes: %zu "
|
|
"B total, %zu B page size; suggested corrected sizes: %zu B total, "
|
|
"%zu B page size",
|
|
endpoint->name_.c_str(), shm->size(),
|
|
endpoint->shmem_page_size_hint_bytes_, shm_size, page_size);
|
|
shm.reset();
|
|
}
|
|
}
|
|
|
|
return std::unique_ptr<ProducerEndpoint>(std::move(endpoint));
|
|
}
|
|
|
|
void TracingServiceImpl::DisconnectProducer(ProducerID id) {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
PERFETTO_DLOG("Producer %" PRIu16 " disconnected", id);
|
|
PERFETTO_DCHECK(producers_.count(id));
|
|
|
|
// Scrape remaining chunks for this producer to ensure we don't lose data.
|
|
if (auto* producer = GetProducer(id)) {
|
|
for (auto& session_id_and_session : tracing_sessions_)
|
|
ScrapeSharedMemoryBuffers(&session_id_and_session.second, producer);
|
|
}
|
|
|
|
for (auto it = data_sources_.begin(); it != data_sources_.end();) {
|
|
auto next = it;
|
|
next++;
|
|
if (it->second.producer_id == id)
|
|
UnregisterDataSource(id, it->second.descriptor.name());
|
|
it = next;
|
|
}
|
|
|
|
producers_.erase(id);
|
|
UpdateMemoryGuardrail();
|
|
}
|
|
|
|
TracingServiceImpl::ProducerEndpointImpl* TracingServiceImpl::GetProducer(
|
|
ProducerID id) const {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
auto it = producers_.find(id);
|
|
if (it == producers_.end())
|
|
return nullptr;
|
|
return it->second;
|
|
}
|
|
|
|
std::unique_ptr<TracingService::ConsumerEndpoint>
|
|
TracingServiceImpl::ConnectConsumer(Consumer* consumer, uid_t uid) {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
PERFETTO_DLOG("Consumer %p connected", reinterpret_cast<void*>(consumer));
|
|
std::unique_ptr<ConsumerEndpointImpl> endpoint(
|
|
new ConsumerEndpointImpl(this, task_runner_, consumer, uid));
|
|
auto it_and_inserted = consumers_.emplace(endpoint.get());
|
|
PERFETTO_DCHECK(it_and_inserted.second);
|
|
// Consumer might go away before we're able to send the connect notification,
|
|
// if that is the case just bail out.
|
|
auto weak_ptr = endpoint->GetWeakPtr();
|
|
task_runner_->PostTask([weak_ptr] {
|
|
if (!weak_ptr) {
|
|
return;
|
|
}
|
|
weak_ptr->consumer_->OnConnect();
|
|
});
|
|
return std::unique_ptr<ConsumerEndpoint>(std::move(endpoint));
|
|
}
|
|
|
|
void TracingServiceImpl::DisconnectConsumer(ConsumerEndpointImpl* consumer) {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
PERFETTO_DLOG("Consumer %p disconnected", reinterpret_cast<void*>(consumer));
|
|
PERFETTO_DCHECK(consumers_.count(consumer));
|
|
|
|
// TODO(primiano) : Check that this is safe (what happens if there are
|
|
// ReadBuffers() calls posted in the meantime? They need to become noop).
|
|
if (consumer->tracing_session_id_)
|
|
FreeBuffers(consumer->tracing_session_id_); // Will also DisableTracing().
|
|
consumers_.erase(consumer);
|
|
|
|
// At this point no more pointers to |consumer| should be around.
|
|
PERFETTO_DCHECK(!std::any_of(
|
|
tracing_sessions_.begin(), tracing_sessions_.end(),
|
|
[consumer](const std::pair<const TracingSessionID, TracingSession>& kv) {
|
|
return kv.second.consumer_maybe_null == consumer;
|
|
}));
|
|
}
|
|
|
|
bool TracingServiceImpl::DetachConsumer(ConsumerEndpointImpl* consumer,
|
|
const std::string& key) {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
PERFETTO_DLOG("Consumer %p detached", reinterpret_cast<void*>(consumer));
|
|
PERFETTO_DCHECK(consumers_.count(consumer));
|
|
|
|
TracingSessionID tsid = consumer->tracing_session_id_;
|
|
TracingSession* tracing_session;
|
|
if (!tsid || !(tracing_session = GetTracingSession(tsid)))
|
|
return false;
|
|
|
|
if (GetDetachedSession(consumer->uid_, key)) {
|
|
PERFETTO_ELOG("Another session has been detached with the same key \"%s\"",
|
|
key.c_str());
|
|
return false;
|
|
}
|
|
|
|
PERFETTO_DCHECK(tracing_session->consumer_maybe_null == consumer);
|
|
tracing_session->consumer_maybe_null = nullptr;
|
|
tracing_session->detach_key = key;
|
|
consumer->tracing_session_id_ = 0;
|
|
return true;
|
|
}
|
|
|
|
bool TracingServiceImpl::AttachConsumer(ConsumerEndpointImpl* consumer,
|
|
const std::string& key) {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
PERFETTO_DLOG("Consumer %p attaching to session %s",
|
|
reinterpret_cast<void*>(consumer), key.c_str());
|
|
PERFETTO_DCHECK(consumers_.count(consumer));
|
|
|
|
if (consumer->tracing_session_id_) {
|
|
PERFETTO_ELOG(
|
|
"Cannot reattach consumer to session %s"
|
|
" while it already attached tracing session ID %" PRIu64,
|
|
key.c_str(), consumer->tracing_session_id_);
|
|
return false;
|
|
}
|
|
|
|
auto* tracing_session = GetDetachedSession(consumer->uid_, key);
|
|
if (!tracing_session) {
|
|
PERFETTO_ELOG(
|
|
"Failed to attach consumer, session '%s' not found for uid %d",
|
|
key.c_str(), static_cast<int>(consumer->uid_));
|
|
return false;
|
|
}
|
|
|
|
consumer->tracing_session_id_ = tracing_session->id;
|
|
tracing_session->consumer_maybe_null = consumer;
|
|
tracing_session->detach_key.clear();
|
|
return true;
|
|
}
|
|
|
|
bool TracingServiceImpl::EnableTracing(ConsumerEndpointImpl* consumer,
|
|
const TraceConfig& cfg,
|
|
base::ScopedFile fd) {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
PERFETTO_DLOG("Enabling tracing for consumer %p",
|
|
reinterpret_cast<void*>(consumer));
|
|
if (cfg.lockdown_mode() == TraceConfig::LOCKDOWN_SET)
|
|
lockdown_mode_ = true;
|
|
if (cfg.lockdown_mode() == TraceConfig::LOCKDOWN_CLEAR)
|
|
lockdown_mode_ = false;
|
|
TracingSession* tracing_session =
|
|
GetTracingSession(consumer->tracing_session_id_);
|
|
if (tracing_session) {
|
|
PERFETTO_DLOG(
|
|
"A Consumer is trying to EnableTracing() but another tracing session "
|
|
"is already active (forgot a call to FreeBuffers() ?)");
|
|
return false;
|
|
}
|
|
|
|
const uint32_t max_duration_ms = cfg.enable_extra_guardrails()
|
|
? kGuardrailsMaxTracingDurationMillis
|
|
: kMaxTracingDurationMillis;
|
|
if (cfg.duration_ms() > max_duration_ms) {
|
|
PERFETTO_ELOG("Requested too long trace (%" PRIu32 "ms > %" PRIu32 " ms)",
|
|
cfg.duration_ms(), max_duration_ms);
|
|
return false;
|
|
}
|
|
|
|
const bool has_trigger_config = cfg.trigger_config().trigger_mode() !=
|
|
TraceConfig::TriggerConfig::UNSPECIFIED;
|
|
if (has_trigger_config && (cfg.trigger_config().trigger_timeout_ms() == 0 ||
|
|
cfg.trigger_config().trigger_timeout_ms() >
|
|
kGuardrailsMaxTracingDurationMillis)) {
|
|
PERFETTO_ELOG(
|
|
"Traces with START_TRACING triggers must provide a positive "
|
|
"trigger_timeout_ms < 7 days (received %" PRIu32 "ms)",
|
|
cfg.trigger_config().trigger_timeout_ms());
|
|
return false;
|
|
}
|
|
|
|
if (has_trigger_config && cfg.duration_ms() != 0) {
|
|
PERFETTO_ELOG(
|
|
"duration_ms was set, this must not be set for traces with triggers.");
|
|
return false;
|
|
}
|
|
|
|
if (cfg.trigger_config().trigger_mode() ==
|
|
TraceConfig::TriggerConfig::STOP_TRACING &&
|
|
cfg.write_into_file()) {
|
|
// We don't support this usecase because there are subtle assumptions which
|
|
// break around TracingServiceEvents and windowed sorting (i.e. if we don't
|
|
// drain the events in ReadBuffers because we are waiting for STOP_TRACING,
|
|
// we can end up queueing up a lot of TracingServiceEvents and emitting them
|
|
// wildy out of order breaking windowed sorting in trace processor).
|
|
PERFETTO_ELOG(
|
|
"Specifying trigger mode STOP_TRACING and write_into_file together is "
|
|
"unsupported");
|
|
return false;
|
|
}
|
|
|
|
std::unordered_set<std::string> triggers;
|
|
for (const auto& trigger : cfg.trigger_config().triggers()) {
|
|
if (!triggers.insert(trigger.name()).second) {
|
|
PERFETTO_ELOG("Duplicate trigger name: %s", trigger.name().c_str());
|
|
return false;
|
|
}
|
|
}
|
|
|
|
if (cfg.enable_extra_guardrails()) {
|
|
if (cfg.deferred_start()) {
|
|
PERFETTO_ELOG(
|
|
"deferred_start=true is not supported in unsupervised traces");
|
|
return false;
|
|
}
|
|
uint64_t buf_size_sum = 0;
|
|
for (const auto& buf : cfg.buffers()) {
|
|
if (buf.size_kb() % 4 != 0) {
|
|
PERFETTO_ELOG("buffers.size_kb must be a multiple of 4, got %" PRIu32,
|
|
buf.size_kb());
|
|
return false;
|
|
}
|
|
buf_size_sum += buf.size_kb();
|
|
}
|
|
if (buf_size_sum > kGuardrailsMaxTracingBufferSizeKb) {
|
|
PERFETTO_ELOG("Requested too large trace buffer (%" PRIu64
|
|
"kB > %" PRIu32 " kB)",
|
|
buf_size_sum, kGuardrailsMaxTracingBufferSizeKb);
|
|
return false;
|
|
}
|
|
}
|
|
|
|
if (cfg.buffers_size() > kMaxBuffersPerConsumer) {
|
|
PERFETTO_ELOG("Too many buffers configured (%d)", cfg.buffers_size());
|
|
return false;
|
|
}
|
|
|
|
if (!cfg.unique_session_name().empty()) {
|
|
const std::string& name = cfg.unique_session_name();
|
|
for (auto& kv : tracing_sessions_) {
|
|
if (kv.second.config.unique_session_name() == name) {
|
|
PERFETTO_ELOG(
|
|
"A trace with this unique session name (%s) already exists",
|
|
name.c_str());
|
|
return false;
|
|
}
|
|
}
|
|
}
|
|
|
|
if (cfg.enable_extra_guardrails()) {
|
|
// unique_session_name can be empty
|
|
const std::string& name = cfg.unique_session_name();
|
|
int64_t now_s = base::GetBootTimeS().count();
|
|
|
|
// Remove any entries where the time limit has passed so this map doesn't
|
|
// grow indefinitely:
|
|
std::map<std::string, int64_t>& sessions = session_to_last_trace_s_;
|
|
for (auto it = sessions.cbegin(); it != sessions.cend();) {
|
|
if (now_s - it->second > kMinSecondsBetweenTracesGuardrail) {
|
|
it = sessions.erase(it);
|
|
} else {
|
|
++it;
|
|
}
|
|
}
|
|
|
|
int64_t& previous_s = session_to_last_trace_s_[name];
|
|
if (previous_s == 0) {
|
|
previous_s = now_s;
|
|
} else {
|
|
PERFETTO_ELOG(
|
|
"A trace with unique session name \"%s\" began less than %" PRId64
|
|
"s ago (%" PRId64 "s)",
|
|
name.c_str(), kMinSecondsBetweenTracesGuardrail, now_s - previous_s);
|
|
return false;
|
|
}
|
|
}
|
|
|
|
const long sessions_for_uid = std::count_if(
|
|
tracing_sessions_.begin(), tracing_sessions_.end(),
|
|
[consumer](const decltype(tracing_sessions_)::value_type& s) {
|
|
return s.second.consumer_uid == consumer->uid_;
|
|
});
|
|
|
|
int per_uid_limit = kMaxConcurrentTracingSessionsPerUid;
|
|
if (consumer->uid_ == 1066 /* AID_STATSD*/) {
|
|
per_uid_limit = kMaxConcurrentTracingSessionsForStatsdUid;
|
|
}
|
|
if (sessions_for_uid >= per_uid_limit) {
|
|
PERFETTO_ELOG(
|
|
"Too many concurrent tracing sesions (%ld) for uid %d limit is %d",
|
|
sessions_for_uid, static_cast<int>(consumer->uid_), per_uid_limit);
|
|
return false;
|
|
}
|
|
|
|
// TODO(primiano): This is a workaround to prevent that a producer gets stuck
|
|
// in a state where it stalls by design by having more TraceWriterImpl
|
|
// instances than free pages in the buffer. This is really a bug in
|
|
// trace_probes and the way it handles stalls in the shmem buffer.
|
|
if (tracing_sessions_.size() >= kMaxConcurrentTracingSessions) {
|
|
PERFETTO_ELOG("Too many concurrent tracing sesions (%zu)",
|
|
tracing_sessions_.size());
|
|
return false;
|
|
}
|
|
|
|
const TracingSessionID tsid = ++last_tracing_session_id_;
|
|
tracing_session =
|
|
&tracing_sessions_.emplace(tsid, TracingSession(tsid, consumer, cfg))
|
|
.first->second;
|
|
|
|
if (cfg.write_into_file()) {
|
|
if (!fd ^ !cfg.output_path().empty()) {
|
|
PERFETTO_ELOG(
|
|
"When write_into_file==true either a FD needs to be passed or "
|
|
"output_path must be populated (but not both)");
|
|
tracing_sessions_.erase(tsid);
|
|
return false;
|
|
}
|
|
if (!cfg.output_path().empty()) {
|
|
fd = CreateTraceFile(cfg.output_path());
|
|
if (!fd) {
|
|
tracing_sessions_.erase(tsid);
|
|
return false;
|
|
}
|
|
}
|
|
tracing_session->write_into_file = std::move(fd);
|
|
uint32_t write_period_ms = cfg.file_write_period_ms();
|
|
if (write_period_ms == 0)
|
|
write_period_ms = kDefaultWriteIntoFilePeriodMs;
|
|
if (write_period_ms < min_write_period_ms_)
|
|
write_period_ms = min_write_period_ms_;
|
|
tracing_session->write_period_ms = write_period_ms;
|
|
tracing_session->max_file_size_bytes = cfg.max_file_size_bytes();
|
|
tracing_session->bytes_written_into_file = 0;
|
|
}
|
|
|
|
// Initialize the log buffers.
|
|
bool did_allocate_all_buffers = true;
|
|
|
|
// Allocate the trace buffers. Also create a map to translate a consumer
|
|
// relative index (TraceConfig.DataSourceConfig.target_buffer) into the
|
|
// corresponding BufferID, which is a global ID namespace for the service and
|
|
// all producers.
|
|
size_t total_buf_size_kb = 0;
|
|
const size_t num_buffers = static_cast<size_t>(cfg.buffers_size());
|
|
tracing_session->buffers_index.reserve(num_buffers);
|
|
for (size_t i = 0; i < num_buffers; i++) {
|
|
const TraceConfig::BufferConfig& buffer_cfg = cfg.buffers()[i];
|
|
BufferID global_id = buffer_ids_.Allocate();
|
|
if (!global_id) {
|
|
did_allocate_all_buffers = false; // We ran out of IDs.
|
|
break;
|
|
}
|
|
tracing_session->buffers_index.push_back(global_id);
|
|
const size_t buf_size_bytes = buffer_cfg.size_kb() * 1024u;
|
|
total_buf_size_kb += buffer_cfg.size_kb();
|
|
TraceBuffer::OverwritePolicy policy =
|
|
buffer_cfg.fill_policy() == TraceConfig::BufferConfig::DISCARD
|
|
? TraceBuffer::kDiscard
|
|
: TraceBuffer::kOverwrite;
|
|
auto it_and_inserted = buffers_.emplace(
|
|
global_id, TraceBuffer::Create(buf_size_bytes, policy));
|
|
PERFETTO_DCHECK(it_and_inserted.second); // buffers_.count(global_id) == 0.
|
|
std::unique_ptr<TraceBuffer>& trace_buffer = it_and_inserted.first->second;
|
|
if (!trace_buffer) {
|
|
did_allocate_all_buffers = false;
|
|
break;
|
|
}
|
|
}
|
|
|
|
UpdateMemoryGuardrail();
|
|
|
|
// This can happen if either:
|
|
// - All the kMaxTraceBufferID slots are taken.
|
|
// - OOM, or, more relistically, we exhausted virtual memory.
|
|
// In any case, free all the previously allocated buffers and abort.
|
|
// TODO(fmayer): add a test to cover this case, this is quite subtle.
|
|
if (!did_allocate_all_buffers) {
|
|
for (BufferID global_id : tracing_session->buffers_index) {
|
|
buffer_ids_.Free(global_id);
|
|
buffers_.erase(global_id);
|
|
}
|
|
tracing_sessions_.erase(tsid);
|
|
return false;
|
|
}
|
|
|
|
consumer->tracing_session_id_ = tsid;
|
|
|
|
// Setup the data sources on the producers without starting them.
|
|
for (const TraceConfig::DataSource& cfg_data_source : cfg.data_sources()) {
|
|
// Scan all the registered data sources with a matching name.
|
|
auto range = data_sources_.equal_range(cfg_data_source.config().name());
|
|
for (auto it = range.first; it != range.second; it++) {
|
|
TraceConfig::ProducerConfig producer_config;
|
|
for (auto& config : cfg.producers()) {
|
|
if (GetProducer(it->second.producer_id)->name_ ==
|
|
config.producer_name()) {
|
|
producer_config = config;
|
|
break;
|
|
}
|
|
}
|
|
SetupDataSource(cfg_data_source, producer_config, it->second,
|
|
tracing_session);
|
|
}
|
|
}
|
|
|
|
bool has_start_trigger = false;
|
|
auto weak_this = weak_ptr_factory_.GetWeakPtr();
|
|
switch (cfg.trigger_config().trigger_mode()) {
|
|
case TraceConfig::TriggerConfig::UNSPECIFIED:
|
|
// no triggers are specified so this isn't a trace that is using triggers.
|
|
PERFETTO_DCHECK(!has_trigger_config);
|
|
break;
|
|
case TraceConfig::TriggerConfig::START_TRACING:
|
|
// For traces which use START_TRACE triggers we need to ensure that the
|
|
// tracing session will be cleaned up when it times out.
|
|
has_start_trigger = true;
|
|
task_runner_->PostDelayedTask(
|
|
[weak_this, tsid]() {
|
|
if (weak_this)
|
|
weak_this->OnStartTriggersTimeout(tsid);
|
|
},
|
|
cfg.trigger_config().trigger_timeout_ms());
|
|
break;
|
|
case TraceConfig::TriggerConfig::STOP_TRACING:
|
|
// Update the tracing_session's duration_ms to ensure that if no trigger
|
|
// is received the session will end and be cleaned up equal to the
|
|
// timeout.
|
|
//
|
|
// TODO(nuskos): Refactor this so that rather then modifying the config we
|
|
// have a field we look at on the tracing_session.
|
|
tracing_session->config.set_duration_ms(
|
|
cfg.trigger_config().trigger_timeout_ms());
|
|
break;
|
|
}
|
|
|
|
tracing_session->state = TracingSession::CONFIGURED;
|
|
PERFETTO_LOG(
|
|
"Configured tracing session %" PRIu64
|
|
", #sources:%zu, duration:%d ms, #buffers:%d, total "
|
|
"buffer size:%zu KB, total sessions:%zu, uid:%d session name: \"%s\"",
|
|
tsid, cfg.data_sources().size(), tracing_session->config.duration_ms(),
|
|
cfg.buffers_size(), total_buf_size_kb, tracing_sessions_.size(),
|
|
static_cast<unsigned int>(consumer->uid_),
|
|
cfg.unique_session_name().c_str());
|
|
|
|
// Start the data sources, unless this is a case of early setup + fast
|
|
// triggering, either through TraceConfig.deferred_start or
|
|
// TraceConfig.trigger_config(). If both are specified which ever one occurs
|
|
// first will initiate the trace.
|
|
if (!cfg.deferred_start() && !has_start_trigger)
|
|
return StartTracing(tsid);
|
|
|
|
return true;
|
|
}
|
|
|
|
void TracingServiceImpl::ChangeTraceConfig(ConsumerEndpointImpl* consumer,
|
|
const TraceConfig& updated_cfg) {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
TracingSession* tracing_session =
|
|
GetTracingSession(consumer->tracing_session_id_);
|
|
PERFETTO_DCHECK(tracing_session);
|
|
|
|
if ((tracing_session->state != TracingSession::STARTED) &&
|
|
(tracing_session->state != TracingSession::CONFIGURED)) {
|
|
PERFETTO_ELOG(
|
|
"ChangeTraceConfig() was called for a tracing session which isn't "
|
|
"running.");
|
|
return;
|
|
}
|
|
|
|
// We only support updating producer_name_{,regex}_filter (and pass-through
|
|
// configs) for now; null out any changeable fields and make sure the rest are
|
|
// identical.
|
|
TraceConfig new_config_copy(updated_cfg);
|
|
for (auto& ds_cfg : *new_config_copy.mutable_data_sources()) {
|
|
ds_cfg.clear_producer_name_filter();
|
|
ds_cfg.clear_producer_name_regex_filter();
|
|
}
|
|
|
|
TraceConfig current_config_copy(tracing_session->config);
|
|
for (auto& ds_cfg : *current_config_copy.mutable_data_sources()) {
|
|
ds_cfg.clear_producer_name_filter();
|
|
ds_cfg.clear_producer_name_regex_filter();
|
|
}
|
|
|
|
if (new_config_copy != current_config_copy) {
|
|
PERFETTO_LOG(
|
|
"ChangeTraceConfig() was called with a config containing unsupported "
|
|
"changes; only adding to the producer_name_{,regex}_filter is "
|
|
"currently supported and will have an effect.");
|
|
}
|
|
|
|
for (TraceConfig::DataSource& cfg_data_source :
|
|
*tracing_session->config.mutable_data_sources()) {
|
|
// Find the updated producer_filter in the new config.
|
|
std::vector<std::string> new_producer_name_filter;
|
|
std::vector<std::string> new_producer_name_regex_filter;
|
|
bool found_data_source = false;
|
|
for (auto it : updated_cfg.data_sources()) {
|
|
if (cfg_data_source.config().name() == it.config().name()) {
|
|
new_producer_name_filter = it.producer_name_filter();
|
|
new_producer_name_regex_filter = it.producer_name_regex_filter();
|
|
found_data_source = true;
|
|
break;
|
|
}
|
|
}
|
|
|
|
// Bail out if data source not present in the new config.
|
|
if (!found_data_source) {
|
|
PERFETTO_ELOG(
|
|
"ChangeTraceConfig() called without a current data source also "
|
|
"present in the new config: %s",
|
|
cfg_data_source.config().name().c_str());
|
|
continue;
|
|
}
|
|
|
|
// TODO(oysteine): Just replacing the filter means that if
|
|
// there are any filter entries which were present in the original config,
|
|
// but removed from the config passed to ChangeTraceConfig, any matching
|
|
// producers will keep producing but newly added producers after this
|
|
// point will never start.
|
|
*cfg_data_source.mutable_producer_name_filter() = new_producer_name_filter;
|
|
*cfg_data_source.mutable_producer_name_regex_filter() =
|
|
new_producer_name_regex_filter;
|
|
|
|
// Scan all the registered data sources with a matching name.
|
|
auto range = data_sources_.equal_range(cfg_data_source.config().name());
|
|
for (auto it = range.first; it != range.second; it++) {
|
|
ProducerEndpointImpl* producer = GetProducer(it->second.producer_id);
|
|
PERFETTO_DCHECK(producer);
|
|
|
|
// Check if the producer name of this data source is present
|
|
// in the name filters. We currently only support new filters, not
|
|
// removing old ones.
|
|
if (!NameMatchesFilter(producer->name_, new_producer_name_filter,
|
|
new_producer_name_regex_filter)) {
|
|
continue;
|
|
}
|
|
|
|
bool already_setup = false;
|
|
auto& ds_instances = tracing_session->data_source_instances;
|
|
for (auto instance_it = ds_instances.begin();
|
|
instance_it != ds_instances.end(); ++instance_it) {
|
|
if (instance_it->first == it->second.producer_id &&
|
|
instance_it->second.data_source_name ==
|
|
cfg_data_source.config().name()) {
|
|
already_setup = true;
|
|
break;
|
|
}
|
|
}
|
|
|
|
if (already_setup)
|
|
continue;
|
|
|
|
// If it wasn't previously setup, set it up now.
|
|
// (The per-producer config is optional).
|
|
TraceConfig::ProducerConfig producer_config;
|
|
for (auto& config : tracing_session->config.producers()) {
|
|
if (producer->name_ == config.producer_name()) {
|
|
producer_config = config;
|
|
break;
|
|
}
|
|
}
|
|
|
|
DataSourceInstance* ds_inst = SetupDataSource(
|
|
cfg_data_source, producer_config, it->second, tracing_session);
|
|
|
|
if (ds_inst && tracing_session->state == TracingSession::STARTED)
|
|
StartDataSourceInstance(producer, tracing_session, ds_inst);
|
|
}
|
|
}
|
|
}
|
|
|
|
bool TracingServiceImpl::StartTracing(TracingSessionID tsid) {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
TracingSession* tracing_session = GetTracingSession(tsid);
|
|
if (!tracing_session) {
|
|
PERFETTO_DLOG("StartTracing() failed, invalid session ID %" PRIu64, tsid);
|
|
return false;
|
|
}
|
|
|
|
if (tracing_session->state != TracingSession::CONFIGURED) {
|
|
PERFETTO_DLOG("StartTracing() failed, invalid session state: %d",
|
|
tracing_session->state);
|
|
return false;
|
|
}
|
|
|
|
tracing_session->state = TracingSession::STARTED;
|
|
|
|
// We store the start of trace snapshot separately as it's important to make
|
|
// sure we can interpret all the data in the trace and storing it in the ring
|
|
// buffer means it could be overwritten by a later snapshot.
|
|
if (!tracing_session->config.builtin_data_sources()
|
|
.disable_clock_snapshotting()) {
|
|
SnapshotClocks(&tracing_session->initial_clock_snapshot);
|
|
}
|
|
|
|
// We don't snapshot the clocks here because we just did this above.
|
|
SnapshotLifecyleEvent(
|
|
tracing_session,
|
|
protos::pbzero::TracingServiceEvent::kTracingStartedFieldNumber,
|
|
false /* snapshot_clocks */);
|
|
|
|
// Periodically snapshot clocks, stats, sync markers while the trace is
|
|
// active. The snapshots are emitted on the future ReadBuffers() calls, which
|
|
// means that:
|
|
// (a) If we're streaming to a file (or to a consumer) while tracing, we
|
|
// write snapshots periodically into the trace.
|
|
// (b) If ReadBuffers() is only called after tracing ends, we emit the latest
|
|
// snapshot into the trace. For clock snapshots, we keep track of the
|
|
// snapshot recorded at the beginning of the session
|
|
// (initial_clock_snapshot above), as well as the most recent sampled
|
|
// snapshots that showed significant new drift between different clocks.
|
|
// The latter clock snapshots are sampled periodically and at lifecycle
|
|
// events.
|
|
PeriodicSnapshotTask(tracing_session);
|
|
|
|
// Trigger delayed task if the trace is time limited.
|
|
const uint32_t trace_duration_ms = tracing_session->config.duration_ms();
|
|
if (trace_duration_ms > 0) {
|
|
auto weak_this = weak_ptr_factory_.GetWeakPtr();
|
|
task_runner_->PostDelayedTask(
|
|
[weak_this, tsid] {
|
|
// Skip entirely the flush if the trace session doesn't exist anymore.
|
|
// This is to prevent misleading error messages to be logged.
|
|
if (!weak_this)
|
|
return;
|
|
auto* tracing_session_ptr = weak_this->GetTracingSession(tsid);
|
|
if (!tracing_session_ptr)
|
|
return;
|
|
// If this trace was using STOP_TRACING triggers and we've seen
|
|
// one, then the trigger overrides the normal timeout. In this
|
|
// case we just return and let the other task clean up this trace.
|
|
if (tracing_session_ptr->config.trigger_config().trigger_mode() ==
|
|
TraceConfig::TriggerConfig::STOP_TRACING &&
|
|
!tracing_session_ptr->received_triggers.empty())
|
|
return;
|
|
// In all other cases (START_TRACING or no triggers) we flush
|
|
// after |trace_duration_ms| unconditionally.
|
|
weak_this->FlushAndDisableTracing(tsid);
|
|
},
|
|
trace_duration_ms);
|
|
}
|
|
|
|
// Start the periodic drain tasks if we should to save the trace into a file.
|
|
if (tracing_session->config.write_into_file()) {
|
|
auto weak_this = weak_ptr_factory_.GetWeakPtr();
|
|
task_runner_->PostDelayedTask(
|
|
[weak_this, tsid] {
|
|
if (weak_this)
|
|
weak_this->ReadBuffers(tsid, nullptr);
|
|
},
|
|
tracing_session->delay_to_next_write_period_ms());
|
|
}
|
|
|
|
// Start the periodic flush tasks if the config specified a flush period.
|
|
if (tracing_session->config.flush_period_ms())
|
|
PeriodicFlushTask(tsid, /*post_next_only=*/true);
|
|
|
|
// Start the periodic incremental state clear tasks if the config specified a
|
|
// period.
|
|
if (tracing_session->config.incremental_state_config().clear_period_ms()) {
|
|
PeriodicClearIncrementalStateTask(tsid, /*post_next_only=*/true);
|
|
}
|
|
|
|
for (auto& kv : tracing_session->data_source_instances) {
|
|
ProducerID producer_id = kv.first;
|
|
DataSourceInstance& data_source = kv.second;
|
|
ProducerEndpointImpl* producer = GetProducer(producer_id);
|
|
if (!producer) {
|
|
PERFETTO_DFATAL("Producer does not exist.");
|
|
continue;
|
|
}
|
|
StartDataSourceInstance(producer, tracing_session, &data_source);
|
|
}
|
|
return true;
|
|
}
|
|
|
|
void TracingServiceImpl::StartDataSourceInstance(
|
|
ProducerEndpointImpl* producer,
|
|
TracingSession* tracing_session,
|
|
TracingServiceImpl::DataSourceInstance* instance) {
|
|
PERFETTO_DCHECK(instance->state == DataSourceInstance::CONFIGURED);
|
|
if (instance->will_notify_on_start) {
|
|
instance->state = DataSourceInstance::STARTING;
|
|
} else {
|
|
instance->state = DataSourceInstance::STARTED;
|
|
}
|
|
if (tracing_session->consumer_maybe_null) {
|
|
tracing_session->consumer_maybe_null->OnDataSourceInstanceStateChange(
|
|
*producer, *instance);
|
|
}
|
|
producer->StartDataSource(instance->instance_id, instance->config);
|
|
|
|
// If all data sources are started, notify the consumer.
|
|
if (instance->state == DataSourceInstance::STARTED)
|
|
MaybeNotifyAllDataSourcesStarted(tracing_session);
|
|
}
|
|
|
|
// DisableTracing just stops the data sources but doesn't free up any buffer.
|
|
// This is to allow the consumer to freeze the buffers (by stopping the trace)
|
|
// and then drain the buffers. The actual teardown of the TracingSession happens
|
|
// in FreeBuffers().
|
|
void TracingServiceImpl::DisableTracing(TracingSessionID tsid,
|
|
bool disable_immediately) {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
TracingSession* tracing_session = GetTracingSession(tsid);
|
|
if (!tracing_session) {
|
|
// Can happen if the consumer calls this before EnableTracing() or after
|
|
// FreeBuffers().
|
|
PERFETTO_DLOG("DisableTracing() failed, invalid session ID %" PRIu64, tsid);
|
|
return;
|
|
}
|
|
|
|
switch (tracing_session->state) {
|
|
// Spurious call to DisableTracing() while already disabled, nothing to do.
|
|
case TracingSession::DISABLED:
|
|
PERFETTO_DCHECK(tracing_session->AllDataSourceInstancesStopped());
|
|
return;
|
|
|
|
// This is either:
|
|
// A) The case of a graceful DisableTracing() call followed by a call to
|
|
// FreeBuffers(), iff |disable_immediately| == true. In this case we want
|
|
// to forcefully transition in the disabled state without waiting for the
|
|
// outstanding acks because the buffers are going to be destroyed soon.
|
|
// B) A spurious call, iff |disable_immediately| == false, in which case
|
|
// there is nothing to do.
|
|
case TracingSession::DISABLING_WAITING_STOP_ACKS:
|
|
PERFETTO_DCHECK(!tracing_session->AllDataSourceInstancesStopped());
|
|
if (disable_immediately)
|
|
DisableTracingNotifyConsumerAndFlushFile(tracing_session);
|
|
return;
|
|
|
|
// Continues below.
|
|
case TracingSession::CONFIGURED:
|
|
// If the session didn't even start there is no need to orchestrate a
|
|
// graceful stop of data sources.
|
|
disable_immediately = true;
|
|
break;
|
|
|
|
// This is the nominal case, continues below.
|
|
case TracingSession::STARTED:
|
|
break;
|
|
}
|
|
|
|
for (auto& data_source_inst : tracing_session->data_source_instances) {
|
|
const ProducerID producer_id = data_source_inst.first;
|
|
DataSourceInstance& instance = data_source_inst.second;
|
|
ProducerEndpointImpl* producer = GetProducer(producer_id);
|
|
PERFETTO_DCHECK(producer);
|
|
PERFETTO_DCHECK(instance.state == DataSourceInstance::CONFIGURED ||
|
|
instance.state == DataSourceInstance::STARTING ||
|
|
instance.state == DataSourceInstance::STARTED);
|
|
StopDataSourceInstance(producer, tracing_session, &instance,
|
|
disable_immediately);
|
|
}
|
|
|
|
// Either this request is flagged with |disable_immediately| or there are no
|
|
// data sources that are requesting a final handshake. In both cases just mark
|
|
// the session as disabled immediately, notify the consumer and flush the
|
|
// trace file (if used).
|
|
if (tracing_session->AllDataSourceInstancesStopped())
|
|
return DisableTracingNotifyConsumerAndFlushFile(tracing_session);
|
|
|
|
tracing_session->state = TracingSession::DISABLING_WAITING_STOP_ACKS;
|
|
auto weak_this = weak_ptr_factory_.GetWeakPtr();
|
|
task_runner_->PostDelayedTask(
|
|
[weak_this, tsid] {
|
|
if (weak_this)
|
|
weak_this->OnDisableTracingTimeout(tsid);
|
|
},
|
|
tracing_session->data_source_stop_timeout_ms());
|
|
|
|
// Deliberately NOT removing the session from |tracing_session_|, it's still
|
|
// needed to call ReadBuffers(). FreeBuffers() will erase() the session.
|
|
}
|
|
|
|
void TracingServiceImpl::NotifyDataSourceStarted(
|
|
ProducerID producer_id,
|
|
DataSourceInstanceID instance_id) {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
for (auto& kv : tracing_sessions_) {
|
|
TracingSession& tracing_session = kv.second;
|
|
DataSourceInstance* instance =
|
|
tracing_session.GetDataSourceInstance(producer_id, instance_id);
|
|
|
|
if (!instance)
|
|
continue;
|
|
|
|
// If the tracing session was already stopped, ignore this notification.
|
|
if (tracing_session.state != TracingSession::STARTED)
|
|
continue;
|
|
|
|
if (instance->state != DataSourceInstance::STARTING) {
|
|
PERFETTO_ELOG("Started data source instance in incorrect state: %d",
|
|
instance->state);
|
|
continue;
|
|
}
|
|
|
|
instance->state = DataSourceInstance::STARTED;
|
|
|
|
ProducerEndpointImpl* producer = GetProducer(producer_id);
|
|
PERFETTO_DCHECK(producer);
|
|
if (tracing_session.consumer_maybe_null) {
|
|
tracing_session.consumer_maybe_null->OnDataSourceInstanceStateChange(
|
|
*producer, *instance);
|
|
}
|
|
|
|
// If all data sources are started, notify the consumer.
|
|
MaybeNotifyAllDataSourcesStarted(&tracing_session);
|
|
} // for (tracing_session)
|
|
}
|
|
|
|
void TracingServiceImpl::MaybeNotifyAllDataSourcesStarted(
|
|
TracingSession* tracing_session) {
|
|
if (!tracing_session->consumer_maybe_null)
|
|
return;
|
|
|
|
if (!tracing_session->AllDataSourceInstancesStarted())
|
|
return;
|
|
|
|
// In some rare cases, we can get in this state more than once. Consider the
|
|
// following scenario: 3 data sources are registered -> trace starts ->
|
|
// all 3 data sources ack -> OnAllDataSourcesStarted() is called.
|
|
// Imagine now that a 4th data source registers while the trace is ongoing.
|
|
// This would hit the AllDataSourceInstancesStarted() condition again.
|
|
// In this case, however, we don't want to re-notify the consumer again.
|
|
// That would be unexpected (even if, perhaps, technically correct) and
|
|
// trigger bugs in the consumer.
|
|
if (tracing_session->did_notify_all_data_source_started)
|
|
return;
|
|
|
|
PERFETTO_DLOG("All data sources started");
|
|
|
|
SnapshotLifecyleEvent(
|
|
tracing_session,
|
|
protos::pbzero::TracingServiceEvent::kAllDataSourcesStartedFieldNumber,
|
|
true /* snapshot_clocks */);
|
|
|
|
tracing_session->did_notify_all_data_source_started = true;
|
|
tracing_session->consumer_maybe_null->OnAllDataSourcesStarted();
|
|
}
|
|
|
|
void TracingServiceImpl::NotifyDataSourceStopped(
|
|
ProducerID producer_id,
|
|
DataSourceInstanceID instance_id) {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
for (auto& kv : tracing_sessions_) {
|
|
TracingSession& tracing_session = kv.second;
|
|
DataSourceInstance* instance =
|
|
tracing_session.GetDataSourceInstance(producer_id, instance_id);
|
|
|
|
if (!instance)
|
|
continue;
|
|
|
|
if (instance->state != DataSourceInstance::STOPPING) {
|
|
PERFETTO_ELOG("Stopped data source instance in incorrect state: %d",
|
|
instance->state);
|
|
continue;
|
|
}
|
|
|
|
instance->state = DataSourceInstance::STOPPED;
|
|
|
|
ProducerEndpointImpl* producer = GetProducer(producer_id);
|
|
PERFETTO_DCHECK(producer);
|
|
if (tracing_session.consumer_maybe_null) {
|
|
tracing_session.consumer_maybe_null->OnDataSourceInstanceStateChange(
|
|
*producer, *instance);
|
|
}
|
|
|
|
if (!tracing_session.AllDataSourceInstancesStopped())
|
|
continue;
|
|
|
|
if (tracing_session.state != TracingSession::DISABLING_WAITING_STOP_ACKS)
|
|
continue;
|
|
|
|
// All data sources acked the termination.
|
|
DisableTracingNotifyConsumerAndFlushFile(&tracing_session);
|
|
} // for (tracing_session)
|
|
}
|
|
|
|
void TracingServiceImpl::ActivateTriggers(
|
|
ProducerID producer_id,
|
|
const std::vector<std::string>& triggers) {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
auto* producer = GetProducer(producer_id);
|
|
PERFETTO_DCHECK(producer);
|
|
for (const auto& trigger_name : triggers) {
|
|
for (auto& id_and_tracing_session : tracing_sessions_) {
|
|
auto& tracing_session = id_and_tracing_session.second;
|
|
TracingSessionID tsid = id_and_tracing_session.first;
|
|
auto iter = std::find_if(
|
|
tracing_session.config.trigger_config().triggers().begin(),
|
|
tracing_session.config.trigger_config().triggers().end(),
|
|
[&trigger_name](const TraceConfig::TriggerConfig::Trigger& trigger) {
|
|
return trigger.name() == trigger_name;
|
|
});
|
|
if (iter == tracing_session.config.trigger_config().triggers().end()) {
|
|
continue;
|
|
}
|
|
|
|
// If this trigger requires a certain producer to have sent it
|
|
// (non-empty producer_name()) ensure the producer who sent this trigger
|
|
// matches.
|
|
if (!iter->producer_name_regex().empty() &&
|
|
!std::regex_match(
|
|
producer->name_,
|
|
std::regex(iter->producer_name_regex(), std::regex::extended))) {
|
|
continue;
|
|
}
|
|
|
|
const bool triggers_already_received =
|
|
!tracing_session.received_triggers.empty();
|
|
tracing_session.received_triggers.push_back(
|
|
{static_cast<uint64_t>(base::GetBootTimeNs().count()), iter->name(),
|
|
producer->name_, producer->uid_});
|
|
auto weak_this = weak_ptr_factory_.GetWeakPtr();
|
|
switch (tracing_session.config.trigger_config().trigger_mode()) {
|
|
case TraceConfig::TriggerConfig::START_TRACING:
|
|
// If the session has already been triggered and moved past
|
|
// CONFIGURED then we don't need to repeat StartTracing. This would
|
|
// work fine (StartTracing would return false) but would add error
|
|
// logs.
|
|
if (tracing_session.state != TracingSession::CONFIGURED)
|
|
break;
|
|
|
|
PERFETTO_DLOG("Triggering '%s' on tracing session %" PRIu64
|
|
" with duration of %" PRIu32 "ms.",
|
|
iter->name().c_str(), tsid, iter->stop_delay_ms());
|
|
// We override the trace duration to be the trigger's requested
|
|
// value, this ensures that the trace will end after this amount
|
|
// of time has passed.
|
|
tracing_session.config.set_duration_ms(iter->stop_delay_ms());
|
|
StartTracing(tsid);
|
|
break;
|
|
case TraceConfig::TriggerConfig::STOP_TRACING:
|
|
// Only stop the trace once to avoid confusing log messages. I.E.
|
|
// when we've already hit the first trigger we've already Posted the
|
|
// task to FlushAndDisable. So all future triggers will just break
|
|
// out.
|
|
if (triggers_already_received)
|
|
break;
|
|
|
|
PERFETTO_DLOG("Triggering '%s' on tracing session %" PRIu64
|
|
" with duration of %" PRIu32 "ms.",
|
|
iter->name().c_str(), tsid, iter->stop_delay_ms());
|
|
// Now that we've seen a trigger we need to stop, flush, and disable
|
|
// this session after the configured |stop_delay_ms|.
|
|
task_runner_->PostDelayedTask(
|
|
[weak_this, tsid] {
|
|
// Skip entirely the flush if the trace session doesn't exist
|
|
// anymore. This is to prevent misleading error messages to be
|
|
// logged.
|
|
if (weak_this && weak_this->GetTracingSession(tsid))
|
|
weak_this->FlushAndDisableTracing(tsid);
|
|
},
|
|
// If this trigger is zero this will immediately executable and
|
|
// will happen shortly.
|
|
iter->stop_delay_ms());
|
|
break;
|
|
case TraceConfig::TriggerConfig::UNSPECIFIED:
|
|
PERFETTO_ELOG("Trigger activated but trigger mode unspecified.");
|
|
break;
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
// Always invoked kDataSourceStopTimeoutMs after DisableTracing(). In nominal
|
|
// conditions all data sources should have acked the stop and this will early
|
|
// out.
|
|
void TracingServiceImpl::OnDisableTracingTimeout(TracingSessionID tsid) {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
TracingSession* tracing_session = GetTracingSession(tsid);
|
|
if (!tracing_session ||
|
|
tracing_session->state != TracingSession::DISABLING_WAITING_STOP_ACKS) {
|
|
return; // Tracing session was successfully disabled.
|
|
}
|
|
|
|
PERFETTO_ILOG("Timeout while waiting for ACKs for tracing session %" PRIu64,
|
|
tsid);
|
|
PERFETTO_DCHECK(!tracing_session->AllDataSourceInstancesStopped());
|
|
DisableTracingNotifyConsumerAndFlushFile(tracing_session);
|
|
}
|
|
|
|
void TracingServiceImpl::DisableTracingNotifyConsumerAndFlushFile(
|
|
TracingSession* tracing_session) {
|
|
PERFETTO_DCHECK(tracing_session->state != TracingSession::DISABLED);
|
|
for (auto& inst_kv : tracing_session->data_source_instances) {
|
|
if (inst_kv.second.state == DataSourceInstance::STOPPED)
|
|
continue;
|
|
inst_kv.second.state = DataSourceInstance::STOPPED;
|
|
ProducerEndpointImpl* producer = GetProducer(inst_kv.first);
|
|
PERFETTO_DCHECK(producer);
|
|
if (tracing_session->consumer_maybe_null) {
|
|
tracing_session->consumer_maybe_null->OnDataSourceInstanceStateChange(
|
|
*producer, inst_kv.second);
|
|
}
|
|
}
|
|
tracing_session->state = TracingSession::DISABLED;
|
|
|
|
// Scrape any remaining chunks that weren't flushed by the producers.
|
|
for (auto& producer_id_and_producer : producers_)
|
|
ScrapeSharedMemoryBuffers(tracing_session, producer_id_and_producer.second);
|
|
|
|
SnapshotLifecyleEvent(
|
|
tracing_session,
|
|
protos::pbzero::TracingServiceEvent::kTracingDisabledFieldNumber,
|
|
true /* snapshot_clocks */);
|
|
|
|
if (tracing_session->write_into_file) {
|
|
tracing_session->write_period_ms = 0;
|
|
ReadBuffers(tracing_session->id, nullptr);
|
|
}
|
|
|
|
if (tracing_session->consumer_maybe_null)
|
|
tracing_session->consumer_maybe_null->NotifyOnTracingDisabled();
|
|
}
|
|
|
|
void TracingServiceImpl::Flush(TracingSessionID tsid,
|
|
uint32_t timeout_ms,
|
|
ConsumerEndpoint::FlushCallback callback) {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
TracingSession* tracing_session = GetTracingSession(tsid);
|
|
if (!tracing_session) {
|
|
PERFETTO_DLOG("Flush() failed, invalid session ID %" PRIu64, tsid);
|
|
return;
|
|
}
|
|
|
|
if (!timeout_ms)
|
|
timeout_ms = tracing_session->flush_timeout_ms();
|
|
|
|
if (tracing_session->pending_flushes.size() > 1000) {
|
|
PERFETTO_ELOG("Too many flushes (%zu) pending for the tracing session",
|
|
tracing_session->pending_flushes.size());
|
|
callback(false);
|
|
return;
|
|
}
|
|
|
|
FlushRequestID flush_request_id = ++last_flush_request_id_;
|
|
PendingFlush& pending_flush =
|
|
tracing_session->pending_flushes
|
|
.emplace_hint(tracing_session->pending_flushes.end(),
|
|
flush_request_id, PendingFlush(std::move(callback)))
|
|
->second;
|
|
|
|
// Send a flush request to each producer involved in the tracing session. In
|
|
// order to issue a flush request we have to build a map of all data source
|
|
// instance ids enabled for each producer.
|
|
std::map<ProducerID, std::vector<DataSourceInstanceID>> flush_map;
|
|
for (const auto& data_source_inst : tracing_session->data_source_instances) {
|
|
const ProducerID producer_id = data_source_inst.first;
|
|
const DataSourceInstanceID ds_inst_id = data_source_inst.second.instance_id;
|
|
flush_map[producer_id].push_back(ds_inst_id);
|
|
}
|
|
|
|
for (const auto& kv : flush_map) {
|
|
ProducerID producer_id = kv.first;
|
|
ProducerEndpointImpl* producer = GetProducer(producer_id);
|
|
const std::vector<DataSourceInstanceID>& data_sources = kv.second;
|
|
producer->Flush(flush_request_id, data_sources);
|
|
pending_flush.producers.insert(producer_id);
|
|
}
|
|
|
|
// If there are no producers to flush (realistically this happens only in
|
|
// some tests) fire OnFlushTimeout() straight away, without waiting.
|
|
if (flush_map.empty())
|
|
timeout_ms = 0;
|
|
|
|
auto weak_this = weak_ptr_factory_.GetWeakPtr();
|
|
task_runner_->PostDelayedTask(
|
|
[weak_this, tsid, flush_request_id] {
|
|
if (weak_this)
|
|
weak_this->OnFlushTimeout(tsid, flush_request_id);
|
|
},
|
|
timeout_ms);
|
|
}
|
|
|
|
void TracingServiceImpl::NotifyFlushDoneForProducer(
|
|
ProducerID producer_id,
|
|
FlushRequestID flush_request_id) {
|
|
for (auto& kv : tracing_sessions_) {
|
|
// Remove all pending flushes <= |flush_request_id| for |producer_id|.
|
|
auto& pending_flushes = kv.second.pending_flushes;
|
|
auto end_it = pending_flushes.upper_bound(flush_request_id);
|
|
for (auto it = pending_flushes.begin(); it != end_it;) {
|
|
PendingFlush& pending_flush = it->second;
|
|
pending_flush.producers.erase(producer_id);
|
|
if (pending_flush.producers.empty()) {
|
|
auto weak_this = weak_ptr_factory_.GetWeakPtr();
|
|
TracingSessionID tsid = kv.first;
|
|
auto callback = std::move(pending_flush.callback);
|
|
task_runner_->PostTask([weak_this, tsid, callback]() {
|
|
if (weak_this) {
|
|
weak_this->CompleteFlush(tsid, std::move(callback),
|
|
/*success=*/true);
|
|
}
|
|
});
|
|
it = pending_flushes.erase(it);
|
|
} else {
|
|
it++;
|
|
}
|
|
} // for (pending_flushes)
|
|
} // for (tracing_session)
|
|
}
|
|
|
|
void TracingServiceImpl::OnFlushTimeout(TracingSessionID tsid,
|
|
FlushRequestID flush_request_id) {
|
|
TracingSession* tracing_session = GetTracingSession(tsid);
|
|
if (!tracing_session)
|
|
return;
|
|
auto it = tracing_session->pending_flushes.find(flush_request_id);
|
|
if (it == tracing_session->pending_flushes.end())
|
|
return; // Nominal case: flush was completed and acked on time.
|
|
|
|
// If there were no producers to flush, consider it a success.
|
|
bool success = it->second.producers.empty();
|
|
|
|
auto callback = std::move(it->second.callback);
|
|
tracing_session->pending_flushes.erase(it);
|
|
CompleteFlush(tsid, std::move(callback), success);
|
|
}
|
|
|
|
void TracingServiceImpl::CompleteFlush(TracingSessionID tsid,
|
|
ConsumerEndpoint::FlushCallback callback,
|
|
bool success) {
|
|
TracingSession* tracing_session = GetTracingSession(tsid);
|
|
if (tracing_session) {
|
|
// Producers may not have been able to flush all their data, even if they
|
|
// indicated flush completion. If possible, also collect uncommitted chunks
|
|
// to make sure we have everything they wrote so far.
|
|
for (auto& producer_id_and_producer : producers_) {
|
|
ScrapeSharedMemoryBuffers(tracing_session,
|
|
producer_id_and_producer.second);
|
|
}
|
|
}
|
|
SnapshotLifecyleEvent(
|
|
tracing_session,
|
|
protos::pbzero::TracingServiceEvent::kAllDataSourcesFlushedFieldNumber,
|
|
true /* snapshot_clocks */);
|
|
callback(success);
|
|
}
|
|
|
|
void TracingServiceImpl::ScrapeSharedMemoryBuffers(
|
|
TracingSession* tracing_session,
|
|
ProducerEndpointImpl* producer) {
|
|
if (!producer->smb_scraping_enabled_)
|
|
return;
|
|
|
|
// Can't copy chunks if we don't know about any trace writers.
|
|
if (producer->writers_.empty())
|
|
return;
|
|
|
|
// Performance optimization: On flush or session disconnect, this method is
|
|
// called for each producer. If the producer doesn't participate in the
|
|
// session, there's no need to scape its chunks right now. We can tell if a
|
|
// producer participates in the session by checking if the producer is allowed
|
|
// to write into the session's log buffers.
|
|
const auto& session_buffers = tracing_session->buffers_index;
|
|
bool producer_in_session =
|
|
std::any_of(session_buffers.begin(), session_buffers.end(),
|
|
[producer](BufferID buffer_id) {
|
|
return producer->allowed_target_buffers_.count(buffer_id);
|
|
});
|
|
if (!producer_in_session)
|
|
return;
|
|
|
|
PERFETTO_DLOG("Scraping SMB for producer %" PRIu16, producer->id_);
|
|
|
|
// Find and copy any uncommitted chunks from the SMB.
|
|
//
|
|
// In nominal conditions, the page layout of the used SMB pages should never
|
|
// change because the service is the only one who is supposed to modify used
|
|
// pages (to make them free again).
|
|
//
|
|
// However, the code here needs to deal with the case of a malicious producer
|
|
// altering the SMB in unpredictable ways. Thankfully the SMB size is
|
|
// immutable, so a chunk will always point to some valid memory, even if the
|
|
// producer alters the intended layout and chunk header concurrently.
|
|
// Ultimately a malicious producer altering the SMB's chunk layout while we
|
|
// are iterating in this function is not any different from the case of a
|
|
// malicious producer asking to commit a chunk made of random data, which is
|
|
// something this class has to deal with regardless.
|
|
//
|
|
// The only legitimate mutations that can happen from sane producers,
|
|
// concurrently to this function, are:
|
|
// A. free pages being partitioned,
|
|
// B. free chunks being migrated to kChunkBeingWritten,
|
|
// C. kChunkBeingWritten chunks being migrated to kChunkCompleted.
|
|
|
|
SharedMemoryABI* abi = &producer->shmem_abi_;
|
|
// num_pages() is immutable after the SMB is initialized and cannot be changed
|
|
// even by a producer even if malicious.
|
|
for (size_t page_idx = 0; page_idx < abi->num_pages(); page_idx++) {
|
|
uint32_t layout = abi->GetPageLayout(page_idx);
|
|
|
|
uint32_t used_chunks = abi->GetUsedChunks(layout); // Returns a bitmap.
|
|
// Skip empty pages.
|
|
if (used_chunks == 0)
|
|
continue;
|
|
|
|
// Scrape the chunks that are currently used. These should be either in
|
|
// state kChunkBeingWritten or kChunkComplete.
|
|
for (uint32_t chunk_idx = 0; used_chunks; chunk_idx++, used_chunks >>= 1) {
|
|
if (!(used_chunks & 1))
|
|
continue;
|
|
|
|
SharedMemoryABI::ChunkState state =
|
|
SharedMemoryABI::GetChunkStateFromLayout(layout, chunk_idx);
|
|
PERFETTO_DCHECK(state == SharedMemoryABI::kChunkBeingWritten ||
|
|
state == SharedMemoryABI::kChunkComplete);
|
|
bool chunk_complete = state == SharedMemoryABI::kChunkComplete;
|
|
|
|
SharedMemoryABI::Chunk chunk =
|
|
abi->GetChunkUnchecked(page_idx, layout, chunk_idx);
|
|
|
|
uint16_t packet_count;
|
|
uint8_t flags;
|
|
// GetPacketCountAndFlags has acquire_load semantics.
|
|
std::tie(packet_count, flags) = chunk.GetPacketCountAndFlags();
|
|
|
|
// It only makes sense to copy an incomplete chunk if there's at least
|
|
// one full packet available. (The producer may not have completed the
|
|
// last packet in it yet, so we need at least 2.)
|
|
if (!chunk_complete && packet_count < 2)
|
|
continue;
|
|
|
|
// At this point, it is safe to access the remaining header fields of
|
|
// the chunk. Even if the chunk was only just transferred from
|
|
// kChunkFree into kChunkBeingWritten state, the header should be
|
|
// written completely once the packet count increased above 1 (it was
|
|
// reset to 0 by the service when the chunk was freed).
|
|
|
|
WriterID writer_id = chunk.writer_id();
|
|
base::Optional<BufferID> target_buffer_id =
|
|
producer->buffer_id_for_writer(writer_id);
|
|
|
|
// We can only scrape this chunk if we know which log buffer to copy it
|
|
// into.
|
|
if (!target_buffer_id)
|
|
continue;
|
|
|
|
// Skip chunks that don't belong to the requested tracing session.
|
|
bool target_buffer_belongs_to_session =
|
|
std::find(session_buffers.begin(), session_buffers.end(),
|
|
*target_buffer_id) != session_buffers.end();
|
|
if (!target_buffer_belongs_to_session)
|
|
continue;
|
|
|
|
uint32_t chunk_id =
|
|
chunk.header()->chunk_id.load(std::memory_order_relaxed);
|
|
|
|
CopyProducerPageIntoLogBuffer(
|
|
producer->id_, producer->uid_, writer_id, chunk_id, *target_buffer_id,
|
|
packet_count, flags, chunk_complete, chunk.payload_begin(),
|
|
chunk.payload_size());
|
|
}
|
|
}
|
|
}
|
|
|
|
void TracingServiceImpl::FlushAndDisableTracing(TracingSessionID tsid) {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
PERFETTO_DLOG("Triggering final flush for %" PRIu64, tsid);
|
|
auto weak_this = weak_ptr_factory_.GetWeakPtr();
|
|
Flush(tsid, 0, [weak_this, tsid](bool success) {
|
|
PERFETTO_DLOG("Flush done (success: %d), disabling trace session %" PRIu64,
|
|
success, tsid);
|
|
if (!weak_this)
|
|
return;
|
|
TracingSession* session = weak_this->GetTracingSession(tsid);
|
|
if (session->consumer_maybe_null) {
|
|
// If the consumer is still attached, just disable the session but give it
|
|
// a chance to read the contents.
|
|
weak_this->DisableTracing(tsid);
|
|
} else {
|
|
// If the consumer detached, destroy the session. If the consumer did
|
|
// start the session in long-tracing mode, the service will have saved
|
|
// the contents to the passed file. If not, the contents will be
|
|
// destroyed.
|
|
weak_this->FreeBuffers(tsid);
|
|
}
|
|
});
|
|
}
|
|
|
|
void TracingServiceImpl::PeriodicFlushTask(TracingSessionID tsid,
|
|
bool post_next_only) {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
TracingSession* tracing_session = GetTracingSession(tsid);
|
|
if (!tracing_session || tracing_session->state != TracingSession::STARTED)
|
|
return;
|
|
|
|
uint32_t flush_period_ms = tracing_session->config.flush_period_ms();
|
|
auto weak_this = weak_ptr_factory_.GetWeakPtr();
|
|
task_runner_->PostDelayedTask(
|
|
[weak_this, tsid] {
|
|
if (weak_this)
|
|
weak_this->PeriodicFlushTask(tsid, /*post_next_only=*/false);
|
|
},
|
|
flush_period_ms - (base::GetWallTimeMs().count() % flush_period_ms));
|
|
|
|
if (post_next_only)
|
|
return;
|
|
|
|
PERFETTO_DLOG("Triggering periodic flush for trace session %" PRIu64, tsid);
|
|
Flush(tsid, 0, [](bool success) {
|
|
if (!success)
|
|
PERFETTO_ELOG("Periodic flush timed out");
|
|
});
|
|
}
|
|
|
|
void TracingServiceImpl::PeriodicClearIncrementalStateTask(
|
|
TracingSessionID tsid,
|
|
bool post_next_only) {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
TracingSession* tracing_session = GetTracingSession(tsid);
|
|
if (!tracing_session || tracing_session->state != TracingSession::STARTED)
|
|
return;
|
|
|
|
uint32_t clear_period_ms =
|
|
tracing_session->config.incremental_state_config().clear_period_ms();
|
|
auto weak_this = weak_ptr_factory_.GetWeakPtr();
|
|
task_runner_->PostDelayedTask(
|
|
[weak_this, tsid] {
|
|
if (weak_this)
|
|
weak_this->PeriodicClearIncrementalStateTask(
|
|
tsid, /*post_next_only=*/false);
|
|
},
|
|
clear_period_ms - (base::GetWallTimeMs().count() % clear_period_ms));
|
|
|
|
if (post_next_only)
|
|
return;
|
|
|
|
PERFETTO_DLOG(
|
|
"Performing periodic incremental state clear for trace session %" PRIu64,
|
|
tsid);
|
|
|
|
// Queue the IPCs to producers with active data sources that opted in.
|
|
std::map<ProducerID, std::vector<DataSourceInstanceID>> clear_map;
|
|
for (const auto& kv : tracing_session->data_source_instances) {
|
|
ProducerID producer_id = kv.first;
|
|
const DataSourceInstance& data_source = kv.second;
|
|
if (data_source.handles_incremental_state_clear)
|
|
clear_map[producer_id].push_back(data_source.instance_id);
|
|
}
|
|
|
|
for (const auto& kv : clear_map) {
|
|
ProducerID producer_id = kv.first;
|
|
const std::vector<DataSourceInstanceID>& data_sources = kv.second;
|
|
ProducerEndpointImpl* producer = GetProducer(producer_id);
|
|
if (!producer) {
|
|
PERFETTO_DFATAL("Producer does not exist.");
|
|
continue;
|
|
}
|
|
producer->ClearIncrementalState(data_sources);
|
|
}
|
|
}
|
|
|
|
// Note: when this is called to write into a file passed when starting tracing
|
|
// |consumer| will be == nullptr (as opposite to the case of a consumer asking
|
|
// to send the trace data back over IPC).
|
|
bool TracingServiceImpl::ReadBuffers(TracingSessionID tsid,
|
|
ConsumerEndpointImpl* consumer) {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
TracingSession* tracing_session = GetTracingSession(tsid);
|
|
if (!tracing_session) {
|
|
// This will be hit systematically from the PostDelayedTask when directly
|
|
// writing into the file (in which case consumer == nullptr). Suppress the
|
|
// log in this case as it's just spam.
|
|
if (consumer) {
|
|
PERFETTO_DLOG("Cannot ReadBuffers(): no tracing session is active");
|
|
}
|
|
return false;
|
|
}
|
|
|
|
// When a tracing session is waiting for a trigger it is considered empty. If
|
|
// a tracing session finishes and moves into DISABLED without ever receiving a
|
|
// trigger the trace should never return any data. This includes the synthetic
|
|
// packets like TraceConfig and Clock snapshots. So we bail out early and let
|
|
// the consumer know there is no data.
|
|
if (!tracing_session->config.trigger_config().triggers().empty() &&
|
|
tracing_session->received_triggers.empty()) {
|
|
PERFETTO_DLOG(
|
|
"ReadBuffers(): tracing session has not received a trigger yet.");
|
|
return false;
|
|
}
|
|
|
|
// This can happen if the file is closed by a previous task because it reaches
|
|
// |max_file_size_bytes|.
|
|
if (!tracing_session->write_into_file && !consumer)
|
|
return false;
|
|
|
|
if (tracing_session->write_into_file && consumer) {
|
|
// If the consumer enabled tracing and asked to save the contents into the
|
|
// passed file makes little sense to also try to read the buffers over IPC,
|
|
// as that would just steal data from the periodic draining task.
|
|
PERFETTO_DFATAL("Consumer trying to read from write_into_file session.");
|
|
return false;
|
|
}
|
|
|
|
std::vector<TracePacket> packets;
|
|
packets.reserve(1024); // Just an educated guess to avoid trivial expansions.
|
|
|
|
if (!tracing_session->initial_clock_snapshot.empty()) {
|
|
EmitClockSnapshot(tracing_session,
|
|
std::move(tracing_session->initial_clock_snapshot),
|
|
&packets);
|
|
}
|
|
|
|
for (auto& snapshot : tracing_session->clock_snapshot_ring_buffer) {
|
|
PERFETTO_DCHECK(!snapshot.empty());
|
|
EmitClockSnapshot(tracing_session, std::move(snapshot), &packets);
|
|
}
|
|
tracing_session->clock_snapshot_ring_buffer.clear();
|
|
|
|
if (tracing_session->should_emit_sync_marker) {
|
|
EmitSyncMarker(&packets);
|
|
tracing_session->should_emit_sync_marker = false;
|
|
}
|
|
|
|
if (!tracing_session->config.builtin_data_sources().disable_trace_config()) {
|
|
MaybeEmitTraceConfig(tracing_session, &packets);
|
|
MaybeEmitReceivedTriggers(tracing_session, &packets);
|
|
}
|
|
if (!tracing_session->config.builtin_data_sources().disable_system_info())
|
|
MaybeEmitSystemInfo(tracing_session, &packets);
|
|
|
|
// Note that in the proto comment, we guarantee that the tracing_started
|
|
// lifecycle event will be emitted before any data packets so make sure to
|
|
// keep this before reading the tracing buffers.
|
|
if (!tracing_session->config.builtin_data_sources().disable_service_events())
|
|
EmitLifecycleEvents(tracing_session, &packets);
|
|
|
|
size_t packets_bytes = 0; // SUM(slice.size() for each slice in |packets|).
|
|
size_t total_slices = 0; // SUM(#slices in |packets|).
|
|
|
|
// Add up size for packets added by the Maybe* calls above.
|
|
for (const TracePacket& packet : packets) {
|
|
packets_bytes += packet.size();
|
|
total_slices += packet.slices().size();
|
|
}
|
|
|
|
// This is a rough threshold to determine how much to read from the buffer in
|
|
// each task. This is to avoid executing a single huge sending task for too
|
|
// long and risk to hit the watchdog. This is *not* an upper bound: we just
|
|
// stop accumulating new packets and PostTask *after* we cross this threshold.
|
|
// This constant essentially balances the PostTask and IPC overhead vs the
|
|
// responsiveness of the service. An extremely small value will cause one IPC
|
|
// and one PostTask for each slice but will keep the service extremely
|
|
// responsive. An extremely large value will batch the send for the full
|
|
// buffer in one large task, will hit the blocking send() once the socket
|
|
// buffers are full and hang the service for a bit (until the consumer
|
|
// catches up).
|
|
static constexpr size_t kApproxBytesPerTask = 32768;
|
|
bool did_hit_threshold = false;
|
|
|
|
// TODO(primiano): Extend the ReadBuffers API to allow reading only some
|
|
// buffers, not all of them in one go.
|
|
for (size_t buf_idx = 0;
|
|
buf_idx < tracing_session->num_buffers() && !did_hit_threshold;
|
|
buf_idx++) {
|
|
auto tbuf_iter = buffers_.find(tracing_session->buffers_index[buf_idx]);
|
|
if (tbuf_iter == buffers_.end()) {
|
|
PERFETTO_DFATAL("Buffer not found.");
|
|
continue;
|
|
}
|
|
TraceBuffer& tbuf = *tbuf_iter->second;
|
|
tbuf.BeginRead();
|
|
while (!did_hit_threshold) {
|
|
TracePacket packet;
|
|
TraceBuffer::PacketSequenceProperties sequence_properties{};
|
|
bool previous_packet_dropped;
|
|
if (!tbuf.ReadNextTracePacket(&packet, &sequence_properties,
|
|
&previous_packet_dropped)) {
|
|
break;
|
|
}
|
|
PERFETTO_DCHECK(sequence_properties.producer_id_trusted != 0);
|
|
PERFETTO_DCHECK(sequence_properties.writer_id != 0);
|
|
PERFETTO_DCHECK(sequence_properties.producer_uid_trusted != kInvalidUid);
|
|
PERFETTO_DCHECK(packet.size() > 0);
|
|
if (!PacketStreamValidator::Validate(packet.slices())) {
|
|
tracing_session->invalid_packets++;
|
|
PERFETTO_DLOG("Dropping invalid packet");
|
|
continue;
|
|
}
|
|
|
|
// Append a slice with the trusted field data. This can't be spoofed
|
|
// because above we validated that the existing slices don't contain any
|
|
// trusted fields. For added safety we append instead of prepending
|
|
// because according to protobuf semantics, if the same field is
|
|
// encountered multiple times the last instance takes priority. Note that
|
|
// truncated packets are also rejected, so the producer can't give us a
|
|
// partial packet (e.g., a truncated string) which only becomes valid when
|
|
// the trusted data is appended here.
|
|
Slice slice = Slice::Allocate(32);
|
|
protozero::StaticBuffered<protos::pbzero::TracePacket> trusted_packet(
|
|
slice.own_data(), slice.size);
|
|
trusted_packet->set_trusted_uid(
|
|
static_cast<int32_t>(sequence_properties.producer_uid_trusted));
|
|
trusted_packet->set_trusted_packet_sequence_id(
|
|
tracing_session->GetPacketSequenceID(
|
|
sequence_properties.producer_id_trusted,
|
|
sequence_properties.writer_id));
|
|
if (previous_packet_dropped)
|
|
trusted_packet->set_previous_packet_dropped(previous_packet_dropped);
|
|
slice.size = trusted_packet.Finalize();
|
|
packet.AddSlice(std::move(slice));
|
|
|
|
// Append the packet (inclusive of the trusted uid) to |packets|.
|
|
packets_bytes += packet.size();
|
|
total_slices += packet.slices().size();
|
|
did_hit_threshold = packets_bytes >= kApproxBytesPerTask &&
|
|
!tracing_session->write_into_file;
|
|
packets.emplace_back(std::move(packet));
|
|
} // for(packets...)
|
|
} // for(buffers...)
|
|
|
|
const bool has_more = did_hit_threshold;
|
|
|
|
size_t prev_packets_size = packets.size();
|
|
if (!tracing_session->config.builtin_data_sources()
|
|
.disable_service_events()) {
|
|
// We don't bother snapshotting clocks here because we wouldn't be able to
|
|
// emit it and we shouldn't have significant drift from the last snapshot in
|
|
// any case.
|
|
SnapshotLifecyleEvent(tracing_session,
|
|
protos::pbzero::TracingServiceEvent::
|
|
kReadTracingBuffersCompletedFieldNumber,
|
|
false /* snapshot_clocks */);
|
|
EmitLifecycleEvents(tracing_session, &packets);
|
|
}
|
|
|
|
// Only emit the stats when there is no more trace data is available to read.
|
|
// That way, any problems that occur while reading from the buffers are
|
|
// reflected in the emitted stats. This is particularly important for use
|
|
// cases where ReadBuffers is only ever called after the tracing session is
|
|
// stopped.
|
|
if (!has_more && tracing_session->should_emit_stats) {
|
|
EmitStats(tracing_session, &packets);
|
|
tracing_session->should_emit_stats = false;
|
|
}
|
|
|
|
// Add sizes of packets emitted by the EmitLifecycleEvents + EmitStats.
|
|
for (size_t i = prev_packets_size; i < packets.size(); ++i) {
|
|
packets_bytes += packets[i].size();
|
|
total_slices += packets[i].slices().size();
|
|
}
|
|
|
|
// If the caller asked us to write into a file by setting
|
|
// |write_into_file| == true in the trace config, drain the packets read
|
|
// (if any) into the given file descriptor.
|
|
if (tracing_session->write_into_file) {
|
|
const uint64_t max_size = tracing_session->max_file_size_bytes
|
|
? tracing_session->max_file_size_bytes
|
|
: std::numeric_limits<size_t>::max();
|
|
|
|
// When writing into a file, the file should look like a root trace.proto
|
|
// message. Each packet should be prepended with a proto preamble stating
|
|
// its field id (within trace.proto) and size. Hence the addition below.
|
|
const size_t max_iovecs = total_slices + packets.size();
|
|
|
|
size_t num_iovecs = 0;
|
|
bool stop_writing_into_file = tracing_session->write_period_ms == 0;
|
|
std::unique_ptr<struct iovec[]> iovecs(new struct iovec[max_iovecs]);
|
|
size_t num_iovecs_at_last_packet = 0;
|
|
uint64_t bytes_about_to_be_written = 0;
|
|
for (TracePacket& packet : packets) {
|
|
std::tie(iovecs[num_iovecs].iov_base, iovecs[num_iovecs].iov_len) =
|
|
packet.GetProtoPreamble();
|
|
bytes_about_to_be_written += iovecs[num_iovecs].iov_len;
|
|
num_iovecs++;
|
|
for (const Slice& slice : packet.slices()) {
|
|
// writev() doesn't change the passed pointer. However, struct iovec
|
|
// take a non-const ptr because it's the same struct used by readv().
|
|
// Hence the const_cast here.
|
|
char* start = static_cast<char*>(const_cast<void*>(slice.start));
|
|
bytes_about_to_be_written += slice.size;
|
|
iovecs[num_iovecs++] = {start, slice.size};
|
|
}
|
|
|
|
if (tracing_session->bytes_written_into_file +
|
|
bytes_about_to_be_written >=
|
|
max_size) {
|
|
stop_writing_into_file = true;
|
|
num_iovecs = num_iovecs_at_last_packet;
|
|
break;
|
|
}
|
|
|
|
num_iovecs_at_last_packet = num_iovecs;
|
|
}
|
|
PERFETTO_DCHECK(num_iovecs <= max_iovecs);
|
|
int fd = *tracing_session->write_into_file;
|
|
|
|
uint64_t total_wr_size = 0;
|
|
|
|
// writev() can take at most IOV_MAX entries per call. Batch them.
|
|
constexpr size_t kIOVMax = IOV_MAX;
|
|
for (size_t i = 0; i < num_iovecs; i += kIOVMax) {
|
|
int iov_batch_size = static_cast<int>(std::min(num_iovecs - i, kIOVMax));
|
|
ssize_t wr_size = PERFETTO_EINTR(writev(fd, &iovecs[i], iov_batch_size));
|
|
if (wr_size <= 0) {
|
|
PERFETTO_PLOG("writev() failed");
|
|
stop_writing_into_file = true;
|
|
break;
|
|
}
|
|
total_wr_size += static_cast<size_t>(wr_size);
|
|
}
|
|
|
|
tracing_session->bytes_written_into_file += total_wr_size;
|
|
|
|
PERFETTO_DLOG("Draining into file, written: %" PRIu64 " KB, stop: %d",
|
|
(total_wr_size + 1023) / 1024, stop_writing_into_file);
|
|
if (stop_writing_into_file) {
|
|
// Ensure all data was written to the file before we close it.
|
|
base::FlushFile(fd);
|
|
tracing_session->write_into_file.reset();
|
|
tracing_session->write_period_ms = 0;
|
|
if (tracing_session->state == TracingSession::STARTED)
|
|
DisableTracing(tsid);
|
|
return true;
|
|
}
|
|
|
|
auto weak_this = weak_ptr_factory_.GetWeakPtr();
|
|
task_runner_->PostDelayedTask(
|
|
[weak_this, tsid] {
|
|
if (weak_this)
|
|
weak_this->ReadBuffers(tsid, nullptr);
|
|
},
|
|
tracing_session->delay_to_next_write_period_ms());
|
|
return true;
|
|
} // if (tracing_session->write_into_file)
|
|
|
|
if (has_more) {
|
|
auto weak_consumer = consumer->GetWeakPtr();
|
|
auto weak_this = weak_ptr_factory_.GetWeakPtr();
|
|
task_runner_->PostTask([weak_this, weak_consumer, tsid] {
|
|
if (!weak_this || !weak_consumer)
|
|
return;
|
|
weak_this->ReadBuffers(tsid, weak_consumer.get());
|
|
});
|
|
}
|
|
|
|
// Keep this as tail call, just in case the consumer re-enters.
|
|
consumer->consumer_->OnTraceData(std::move(packets), has_more);
|
|
return true;
|
|
}
|
|
|
|
void TracingServiceImpl::FreeBuffers(TracingSessionID tsid) {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
PERFETTO_DLOG("Freeing buffers for session %" PRIu64, tsid);
|
|
TracingSession* tracing_session = GetTracingSession(tsid);
|
|
if (!tracing_session) {
|
|
PERFETTO_DLOG("FreeBuffers() failed, invalid session ID %" PRIu64, tsid);
|
|
return; // TODO(primiano): signal failure?
|
|
}
|
|
DisableTracing(tsid, /*disable_immediately=*/true);
|
|
|
|
PERFETTO_DCHECK(tracing_session->AllDataSourceInstancesStopped());
|
|
tracing_session->data_source_instances.clear();
|
|
|
|
for (auto& producer_entry : producers_) {
|
|
ProducerEndpointImpl* producer = producer_entry.second;
|
|
producer->OnFreeBuffers(tracing_session->buffers_index);
|
|
}
|
|
|
|
for (BufferID buffer_id : tracing_session->buffers_index) {
|
|
buffer_ids_.Free(buffer_id);
|
|
PERFETTO_DCHECK(buffers_.count(buffer_id) == 1);
|
|
buffers_.erase(buffer_id);
|
|
}
|
|
bool notify_traceur = tracing_session->config.notify_traceur();
|
|
tracing_sessions_.erase(tsid);
|
|
UpdateMemoryGuardrail();
|
|
|
|
PERFETTO_LOG("Tracing session %" PRIu64 " ended, total sessions:%zu", tsid,
|
|
tracing_sessions_.size());
|
|
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_OS_ANDROID)
|
|
static const char kTraceurProp[] = "sys.trace.trace_end_signal";
|
|
if (notify_traceur && __system_property_set(kTraceurProp, "1"))
|
|
PERFETTO_ELOG("Failed to setprop %s=1", kTraceurProp);
|
|
#else
|
|
base::ignore_result(notify_traceur);
|
|
#endif
|
|
}
|
|
|
|
void TracingServiceImpl::RegisterDataSource(ProducerID producer_id,
|
|
const DataSourceDescriptor& desc) {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
PERFETTO_DLOG("Producer %" PRIu16 " registered data source \"%s\"",
|
|
producer_id, desc.name().c_str());
|
|
|
|
PERFETTO_DCHECK(!desc.name().empty());
|
|
auto reg_ds = data_sources_.emplace(desc.name(),
|
|
RegisteredDataSource{producer_id, desc});
|
|
|
|
// If there are existing tracing sessions, we need to check if the new
|
|
// data source is enabled by any of them.
|
|
if (tracing_sessions_.empty())
|
|
return;
|
|
|
|
ProducerEndpointImpl* producer = GetProducer(producer_id);
|
|
if (!producer) {
|
|
PERFETTO_DFATAL("Producer not found.");
|
|
return;
|
|
}
|
|
|
|
for (auto& iter : tracing_sessions_) {
|
|
TracingSession& tracing_session = iter.second;
|
|
if (tracing_session.state != TracingSession::STARTED &&
|
|
tracing_session.state != TracingSession::CONFIGURED) {
|
|
continue;
|
|
}
|
|
|
|
TraceConfig::ProducerConfig producer_config;
|
|
for (auto& config : tracing_session.config.producers()) {
|
|
if (producer->name_ == config.producer_name()) {
|
|
producer_config = config;
|
|
break;
|
|
}
|
|
}
|
|
for (const TraceConfig::DataSource& cfg_data_source :
|
|
tracing_session.config.data_sources()) {
|
|
if (cfg_data_source.config().name() != desc.name())
|
|
continue;
|
|
DataSourceInstance* ds_inst = SetupDataSource(
|
|
cfg_data_source, producer_config, reg_ds->second, &tracing_session);
|
|
if (ds_inst && tracing_session.state == TracingSession::STARTED)
|
|
StartDataSourceInstance(producer, &tracing_session, ds_inst);
|
|
}
|
|
}
|
|
}
|
|
|
|
void TracingServiceImpl::StopDataSourceInstance(ProducerEndpointImpl* producer,
|
|
TracingSession* tracing_session,
|
|
DataSourceInstance* instance,
|
|
bool disable_immediately) {
|
|
const DataSourceInstanceID ds_inst_id = instance->instance_id;
|
|
if (instance->will_notify_on_stop && !disable_immediately) {
|
|
instance->state = DataSourceInstance::STOPPING;
|
|
} else {
|
|
instance->state = DataSourceInstance::STOPPED;
|
|
}
|
|
if (tracing_session->consumer_maybe_null) {
|
|
tracing_session->consumer_maybe_null->OnDataSourceInstanceStateChange(
|
|
*producer, *instance);
|
|
}
|
|
producer->StopDataSource(ds_inst_id);
|
|
}
|
|
|
|
void TracingServiceImpl::UnregisterDataSource(ProducerID producer_id,
|
|
const std::string& name) {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
PERFETTO_DLOG("Producer %" PRIu16 " unregistered data source \"%s\"",
|
|
producer_id, name.c_str());
|
|
PERFETTO_CHECK(producer_id);
|
|
ProducerEndpointImpl* producer = GetProducer(producer_id);
|
|
PERFETTO_DCHECK(producer);
|
|
for (auto& kv : tracing_sessions_) {
|
|
auto& ds_instances = kv.second.data_source_instances;
|
|
bool removed = false;
|
|
for (auto it = ds_instances.begin(); it != ds_instances.end();) {
|
|
if (it->first == producer_id && it->second.data_source_name == name) {
|
|
DataSourceInstanceID ds_inst_id = it->second.instance_id;
|
|
if (it->second.state != DataSourceInstance::STOPPED) {
|
|
if (it->second.state != DataSourceInstance::STOPPING)
|
|
StopDataSourceInstance(producer, &kv.second, &it->second,
|
|
/* disable_immediately = */ false);
|
|
// Mark the instance as stopped immediately, since we are
|
|
// unregistering it below.
|
|
if (it->second.state == DataSourceInstance::STOPPING)
|
|
NotifyDataSourceStopped(producer_id, ds_inst_id);
|
|
}
|
|
it = ds_instances.erase(it);
|
|
removed = true;
|
|
} else {
|
|
++it;
|
|
}
|
|
} // for (data_source_instances)
|
|
if (removed)
|
|
MaybeNotifyAllDataSourcesStarted(&kv.second);
|
|
} // for (tracing_session)
|
|
|
|
for (auto it = data_sources_.begin(); it != data_sources_.end(); ++it) {
|
|
if (it->second.producer_id == producer_id &&
|
|
it->second.descriptor.name() == name) {
|
|
data_sources_.erase(it);
|
|
return;
|
|
}
|
|
}
|
|
|
|
PERFETTO_DFATAL(
|
|
"Tried to unregister a non-existent data source \"%s\" for "
|
|
"producer %" PRIu16,
|
|
name.c_str(), producer_id);
|
|
}
|
|
|
|
TracingServiceImpl::DataSourceInstance* TracingServiceImpl::SetupDataSource(
|
|
const TraceConfig::DataSource& cfg_data_source,
|
|
const TraceConfig::ProducerConfig& producer_config,
|
|
const RegisteredDataSource& data_source,
|
|
TracingSession* tracing_session) {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
ProducerEndpointImpl* producer = GetProducer(data_source.producer_id);
|
|
PERFETTO_DCHECK(producer);
|
|
// An existing producer that is not ftrace could have registered itself as
|
|
// ftrace, we must not enable it in that case.
|
|
if (lockdown_mode_ && producer->uid_ != uid_) {
|
|
PERFETTO_DLOG("Lockdown mode: not enabling producer %hu", producer->id_);
|
|
return nullptr;
|
|
}
|
|
// TODO(primiano): Add tests for registration ordering (data sources vs
|
|
// consumers).
|
|
if (!NameMatchesFilter(producer->name_,
|
|
cfg_data_source.producer_name_filter(),
|
|
cfg_data_source.producer_name_regex_filter())) {
|
|
PERFETTO_DLOG("Data source: %s is filtered out for producer: %s",
|
|
cfg_data_source.config().name().c_str(),
|
|
producer->name_.c_str());
|
|
return nullptr;
|
|
}
|
|
|
|
auto relative_buffer_id = cfg_data_source.config().target_buffer();
|
|
if (relative_buffer_id >= tracing_session->num_buffers()) {
|
|
PERFETTO_LOG(
|
|
"The TraceConfig for DataSource %s specified a target_buffer out of "
|
|
"bound (%d). Skipping it.",
|
|
cfg_data_source.config().name().c_str(), relative_buffer_id);
|
|
return nullptr;
|
|
}
|
|
|
|
// Create a copy of the DataSourceConfig specified in the trace config. This
|
|
// will be passed to the producer after translating the |target_buffer| id.
|
|
// The |target_buffer| parameter passed by the consumer in the trace config is
|
|
// relative to the buffers declared in the same trace config. This has to be
|
|
// translated to the global BufferID before passing it to the producers, which
|
|
// don't know anything about tracing sessions and consumers.
|
|
|
|
DataSourceInstanceID inst_id = ++last_data_source_instance_id_;
|
|
auto insert_iter = tracing_session->data_source_instances.emplace(
|
|
std::piecewise_construct, //
|
|
std::forward_as_tuple(producer->id_),
|
|
std::forward_as_tuple(
|
|
inst_id,
|
|
cfg_data_source.config(), // Deliberate copy.
|
|
data_source.descriptor.name(),
|
|
data_source.descriptor.will_notify_on_start(),
|
|
data_source.descriptor.will_notify_on_stop(),
|
|
data_source.descriptor.handles_incremental_state_clear()));
|
|
DataSourceInstance* ds_instance = &insert_iter->second;
|
|
|
|
// New data source instance starts out in CONFIGURED state.
|
|
if (tracing_session->consumer_maybe_null) {
|
|
tracing_session->consumer_maybe_null->OnDataSourceInstanceStateChange(
|
|
*producer, *ds_instance);
|
|
}
|
|
|
|
DataSourceConfig& ds_config = ds_instance->config;
|
|
ds_config.set_trace_duration_ms(tracing_session->config.duration_ms());
|
|
ds_config.set_stop_timeout_ms(tracing_session->data_source_stop_timeout_ms());
|
|
ds_config.set_enable_extra_guardrails(
|
|
tracing_session->config.enable_extra_guardrails());
|
|
ds_config.set_tracing_session_id(tracing_session->id);
|
|
BufferID global_id = tracing_session->buffers_index[relative_buffer_id];
|
|
PERFETTO_DCHECK(global_id);
|
|
ds_config.set_target_buffer(global_id);
|
|
|
|
PERFETTO_DLOG("Setting up data source %s with target buffer %" PRIu16,
|
|
ds_config.name().c_str(), global_id);
|
|
if (!producer->shared_memory()) {
|
|
// Determine the SMB page size. Must be an integer multiple of 4k.
|
|
// As for the SMB size below, the decision tree is as follows:
|
|
// 1. Give priority to what is defined in the trace config.
|
|
// 2. If unset give priority to the hint passed by the producer.
|
|
// 3. Keep within bounds and ensure it's a multiple of 4k.
|
|
size_t page_size = producer_config.page_size_kb() * 1024;
|
|
if (page_size == 0)
|
|
page_size = producer->shmem_page_size_hint_bytes_;
|
|
|
|
// Determine the SMB size. Must be an integer multiple of the SMB page size.
|
|
// The decision tree is as follows:
|
|
// 1. Give priority to what defined in the trace config.
|
|
// 2. If unset give priority to the hint passed by the producer.
|
|
// 3. Keep within bounds and ensure it's a multiple of the page size.
|
|
size_t shm_size = producer_config.shm_size_kb() * 1024;
|
|
if (shm_size == 0)
|
|
shm_size = producer->shmem_size_hint_bytes_;
|
|
|
|
auto valid_sizes = EnsureValidShmSizes(shm_size, page_size);
|
|
if (valid_sizes != std::tie(shm_size, page_size)) {
|
|
PERFETTO_DLOG(
|
|
"Invalid configured SMB sizes: shm_size %zu page_size %zu. Falling "
|
|
"back to shm_size %zu page_size %zu.",
|
|
shm_size, page_size, std::get<0>(valid_sizes),
|
|
std::get<1>(valid_sizes));
|
|
}
|
|
std::tie(shm_size, page_size) = valid_sizes;
|
|
|
|
// TODO(primiano): right now Create() will suicide in case of OOM if the
|
|
// mmap fails. We should instead gracefully fail the request and tell the
|
|
// client to go away.
|
|
PERFETTO_DLOG("Creating SMB of %zu KB for producer \"%s\"", shm_size / 1024,
|
|
producer->name_.c_str());
|
|
auto shared_memory = shm_factory_->CreateSharedMemory(shm_size);
|
|
producer->SetupSharedMemory(std::move(shared_memory), page_size,
|
|
/*provided_by_producer=*/false);
|
|
}
|
|
producer->SetupDataSource(inst_id, ds_config);
|
|
return ds_instance;
|
|
}
|
|
|
|
// Note: all the fields % *_trusted ones are untrusted, as in, the Producer
|
|
// might be lying / returning garbage contents. |src| and |size| can be trusted
|
|
// in terms of being a valid pointer, but not the contents.
|
|
void TracingServiceImpl::CopyProducerPageIntoLogBuffer(
|
|
ProducerID producer_id_trusted,
|
|
uid_t producer_uid_trusted,
|
|
WriterID writer_id,
|
|
ChunkID chunk_id,
|
|
BufferID buffer_id,
|
|
uint16_t num_fragments,
|
|
uint8_t chunk_flags,
|
|
bool chunk_complete,
|
|
const uint8_t* src,
|
|
size_t size) {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
|
|
ProducerEndpointImpl* producer = GetProducer(producer_id_trusted);
|
|
if (!producer) {
|
|
PERFETTO_DFATAL("Producer not found.");
|
|
chunks_discarded_++;
|
|
return;
|
|
}
|
|
|
|
TraceBuffer* buf = GetBufferByID(buffer_id);
|
|
if (!buf) {
|
|
PERFETTO_DLOG("Could not find target buffer %" PRIu16
|
|
" for producer %" PRIu16,
|
|
buffer_id, producer_id_trusted);
|
|
chunks_discarded_++;
|
|
return;
|
|
}
|
|
|
|
// Verify that the producer is actually allowed to write into the target
|
|
// buffer specified in the request. This prevents a malicious producer from
|
|
// injecting data into a log buffer that belongs to a tracing session the
|
|
// producer is not part of.
|
|
if (!producer->is_allowed_target_buffer(buffer_id)) {
|
|
PERFETTO_ELOG("Producer %" PRIu16
|
|
" tried to write into forbidden target buffer %" PRIu16,
|
|
producer_id_trusted, buffer_id);
|
|
PERFETTO_DFATAL("Forbidden target buffer");
|
|
chunks_discarded_++;
|
|
return;
|
|
}
|
|
|
|
// If the writer was registered by the producer, it should only write into the
|
|
// buffer it was registered with.
|
|
base::Optional<BufferID> associated_buffer =
|
|
producer->buffer_id_for_writer(writer_id);
|
|
if (associated_buffer && *associated_buffer != buffer_id) {
|
|
PERFETTO_ELOG("Writer %" PRIu16 " of producer %" PRIu16
|
|
" was registered to write into target buffer %" PRIu16
|
|
", but tried to write into buffer %" PRIu16,
|
|
writer_id, producer_id_trusted, *associated_buffer,
|
|
buffer_id);
|
|
PERFETTO_DFATAL("Wrong target buffer");
|
|
chunks_discarded_++;
|
|
return;
|
|
}
|
|
|
|
buf->CopyChunkUntrusted(producer_id_trusted, producer_uid_trusted, writer_id,
|
|
chunk_id, num_fragments, chunk_flags, chunk_complete,
|
|
src, size);
|
|
}
|
|
|
|
void TracingServiceImpl::ApplyChunkPatches(
|
|
ProducerID producer_id_trusted,
|
|
const std::vector<CommitDataRequest::ChunkToPatch>& chunks_to_patch) {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
|
|
for (const auto& chunk : chunks_to_patch) {
|
|
const ChunkID chunk_id = static_cast<ChunkID>(chunk.chunk_id());
|
|
const WriterID writer_id = static_cast<WriterID>(chunk.writer_id());
|
|
TraceBuffer* buf =
|
|
GetBufferByID(static_cast<BufferID>(chunk.target_buffer()));
|
|
static_assert(std::numeric_limits<ChunkID>::max() == kMaxChunkID,
|
|
"Add a '|| chunk_id > kMaxChunkID' below if this fails");
|
|
if (!writer_id || writer_id > kMaxWriterID || !buf) {
|
|
// This can genuinely happen when the trace is stopped. The producers
|
|
// might see the stop signal with some delay and try to keep sending
|
|
// patches left soon after.
|
|
PERFETTO_DLOG(
|
|
"Received invalid chunks_to_patch request from Producer: %" PRIu16
|
|
", BufferID: %" PRIu32 " ChunkdID: %" PRIu32 " WriterID: %" PRIu16,
|
|
producer_id_trusted, chunk.target_buffer(), chunk_id, writer_id);
|
|
patches_discarded_ += static_cast<uint64_t>(chunk.patches_size());
|
|
continue;
|
|
}
|
|
|
|
// Note, there's no need to validate that the producer is allowed to write
|
|
// to the specified buffer ID (or that it's the correct buffer ID for a
|
|
// registered TraceWriter). That's because TraceBuffer uses the producer ID
|
|
// and writer ID to look up the chunk to patch. If the producer specifies an
|
|
// incorrect buffer, this lookup will fail and TraceBuffer will ignore the
|
|
// patches. Because the producer ID is trusted, there's also no way for a
|
|
// malicious producer to patch another producer's data.
|
|
|
|
// Speculate on the fact that there are going to be a limited amount of
|
|
// patches per request, so we can allocate the |patches| array on the stack.
|
|
std::array<TraceBuffer::Patch, 1024> patches; // Uninitialized.
|
|
if (chunk.patches().size() > patches.size()) {
|
|
PERFETTO_ELOG("Too many patches (%zu) batched in the same request",
|
|
patches.size());
|
|
PERFETTO_DFATAL("Too many patches");
|
|
patches_discarded_ += static_cast<uint64_t>(chunk.patches_size());
|
|
continue;
|
|
}
|
|
|
|
size_t i = 0;
|
|
for (const auto& patch : chunk.patches()) {
|
|
const std::string& patch_data = patch.data();
|
|
if (patch_data.size() != patches[i].data.size()) {
|
|
PERFETTO_ELOG("Received patch from producer: %" PRIu16
|
|
" of unexpected size %zu",
|
|
producer_id_trusted, patch_data.size());
|
|
patches_discarded_++;
|
|
continue;
|
|
}
|
|
patches[i].offset_untrusted = patch.offset();
|
|
memcpy(&patches[i].data[0], patch_data.data(), patches[i].data.size());
|
|
i++;
|
|
}
|
|
buf->TryPatchChunkContents(producer_id_trusted, writer_id, chunk_id,
|
|
&patches[0], i, chunk.has_more_patches());
|
|
}
|
|
}
|
|
|
|
TracingServiceImpl::TracingSession* TracingServiceImpl::GetDetachedSession(
|
|
uid_t uid,
|
|
const std::string& key) {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
for (auto& kv : tracing_sessions_) {
|
|
TracingSession* session = &kv.second;
|
|
if (session->consumer_uid == uid && session->detach_key == key) {
|
|
PERFETTO_DCHECK(session->consumer_maybe_null == nullptr);
|
|
return session;
|
|
}
|
|
}
|
|
return nullptr;
|
|
}
|
|
|
|
TracingServiceImpl::TracingSession* TracingServiceImpl::GetTracingSession(
|
|
TracingSessionID tsid) {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
auto it = tsid ? tracing_sessions_.find(tsid) : tracing_sessions_.end();
|
|
if (it == tracing_sessions_.end())
|
|
return nullptr;
|
|
return &it->second;
|
|
}
|
|
|
|
ProducerID TracingServiceImpl::GetNextProducerID() {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
PERFETTO_CHECK(producers_.size() < kMaxProducerID);
|
|
do {
|
|
++last_producer_id_;
|
|
} while (producers_.count(last_producer_id_) || last_producer_id_ == 0);
|
|
PERFETTO_DCHECK(last_producer_id_ > 0 && last_producer_id_ <= kMaxProducerID);
|
|
return last_producer_id_;
|
|
}
|
|
|
|
TraceBuffer* TracingServiceImpl::GetBufferByID(BufferID buffer_id) {
|
|
auto buf_iter = buffers_.find(buffer_id);
|
|
if (buf_iter == buffers_.end())
|
|
return nullptr;
|
|
return &*buf_iter->second;
|
|
}
|
|
|
|
void TracingServiceImpl::OnStartTriggersTimeout(TracingSessionID tsid) {
|
|
// Skip entirely the flush if the trace session doesn't exist anymore.
|
|
// This is to prevent misleading error messages to be logged.
|
|
//
|
|
// if the trace has started from the trigger we rely on
|
|
// the |stop_delay_ms| from the trigger so don't flush and
|
|
// disable if we've moved beyond a CONFIGURED state
|
|
auto* tracing_session_ptr = GetTracingSession(tsid);
|
|
if (tracing_session_ptr &&
|
|
tracing_session_ptr->state == TracingSession::CONFIGURED) {
|
|
PERFETTO_DLOG("Disabling TracingSession %" PRIu64
|
|
" since no triggers activated.",
|
|
tsid);
|
|
// No data should be returned from ReadBuffers() regardless of if we
|
|
// call FreeBuffers() or DisableTracing(). This is because in
|
|
// STOP_TRACING we need this promise in either case, and using
|
|
// DisableTracing() allows a graceful shutdown. Consumers can follow
|
|
// their normal path and check the buffers through ReadBuffers() and
|
|
// the code won't hang because the tracing session will still be
|
|
// alive just disabled.
|
|
DisableTracing(tsid);
|
|
}
|
|
}
|
|
|
|
void TracingServiceImpl::UpdateMemoryGuardrail() {
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_WATCHDOG)
|
|
uint64_t total_buffer_bytes = 0;
|
|
|
|
// Sum up all the shared memory buffers.
|
|
for (const auto& id_to_producer : producers_) {
|
|
if (id_to_producer.second->shared_memory())
|
|
total_buffer_bytes += id_to_producer.second->shared_memory()->size();
|
|
}
|
|
|
|
// Sum up all the trace buffers.
|
|
for (const auto& id_to_buffer : buffers_) {
|
|
total_buffer_bytes += id_to_buffer.second->size();
|
|
}
|
|
|
|
// Set the guard rail to 32MB + the sum of all the buffers over a 30 second
|
|
// interval.
|
|
uint64_t guardrail = base::kWatchdogDefaultMemorySlack + total_buffer_bytes;
|
|
base::Watchdog::GetInstance()->SetMemoryLimit(guardrail, 30 * 1000);
|
|
#endif
|
|
}
|
|
|
|
void TracingServiceImpl::PeriodicSnapshotTask(TracingSession* tracing_session) {
|
|
tracing_session->should_emit_sync_marker = true;
|
|
tracing_session->should_emit_stats = true;
|
|
MaybeSnapshotClocksIntoRingBuffer(tracing_session);
|
|
|
|
uint32_t interval_ms =
|
|
tracing_session->config.builtin_data_sources().snapshot_interval_ms();
|
|
if (!interval_ms)
|
|
interval_ms = kDefaultSnapshotsIntervalMs;
|
|
|
|
TracingSessionID tsid = tracing_session->id;
|
|
auto weak_this = weak_ptr_factory_.GetWeakPtr();
|
|
task_runner_->PostDelayedTask(
|
|
[weak_this, tsid] {
|
|
if (!weak_this)
|
|
return;
|
|
auto* tracing_session_ptr = weak_this->GetTracingSession(tsid);
|
|
if (!tracing_session_ptr)
|
|
return;
|
|
if (tracing_session_ptr->state != TracingSession::STARTED)
|
|
return;
|
|
weak_this->PeriodicSnapshotTask(tracing_session_ptr);
|
|
},
|
|
interval_ms - (base::GetWallTimeMs().count() % interval_ms));
|
|
}
|
|
|
|
void TracingServiceImpl::SnapshotLifecyleEvent(TracingSession* tracing_session,
|
|
uint32_t field_id,
|
|
bool snapshot_clocks) {
|
|
// field_id should be an id of a field in TracingServiceEvent.
|
|
auto& lifecycle_events = tracing_session->lifecycle_events;
|
|
auto event_it =
|
|
std::find_if(lifecycle_events.begin(), lifecycle_events.end(),
|
|
[field_id](const TracingSession::LifecycleEvent& event) {
|
|
return event.field_id == field_id;
|
|
});
|
|
|
|
TracingSession::LifecycleEvent* event;
|
|
if (event_it == lifecycle_events.end()) {
|
|
lifecycle_events.emplace_back(field_id);
|
|
event = &lifecycle_events.back();
|
|
} else {
|
|
event = &*event_it;
|
|
}
|
|
|
|
// Snapshot the clocks before capturing the timestamp for the event so we can
|
|
// use this snapshot to resolve the event timestamp if necessary.
|
|
if (snapshot_clocks)
|
|
MaybeSnapshotClocksIntoRingBuffer(tracing_session);
|
|
|
|
// Erase before emplacing to prevent a unncessary doubling of memory if
|
|
// not needed.
|
|
if (event->timestamps.size() >= event->max_size) {
|
|
event->timestamps.erase_front(1 + event->timestamps.size() -
|
|
event->max_size);
|
|
}
|
|
event->timestamps.emplace_back(base::GetBootTimeNs().count());
|
|
}
|
|
|
|
void TracingServiceImpl::MaybeSnapshotClocksIntoRingBuffer(
|
|
TracingSession* tracing_session) {
|
|
if (tracing_session->config.builtin_data_sources()
|
|
.disable_clock_snapshotting()) {
|
|
return;
|
|
}
|
|
|
|
// We are making an explicit copy of the latest snapshot (if it exists)
|
|
// because SnapshotClocks reads this data and computes the drift based on its
|
|
// content. If the clock drift is high enough, it will update the contents of
|
|
// |snapshot| and return true. Otherwise, it will return false.
|
|
TracingSession::ClockSnapshotData snapshot =
|
|
tracing_session->clock_snapshot_ring_buffer.empty()
|
|
? TracingSession::ClockSnapshotData()
|
|
: tracing_session->clock_snapshot_ring_buffer.back();
|
|
bool did_update = SnapshotClocks(&snapshot);
|
|
if (did_update) {
|
|
// This means clocks drifted enough since last snapshot. See the comment
|
|
// in SnapshotClocks.
|
|
auto* snapshot_buffer = &tracing_session->clock_snapshot_ring_buffer;
|
|
|
|
// Erase before emplacing to prevent a unncessary doubling of memory if
|
|
// not needed.
|
|
static constexpr uint32_t kClockSnapshotRingBufferSize = 16;
|
|
if (snapshot_buffer->size() >= kClockSnapshotRingBufferSize) {
|
|
snapshot_buffer->erase_front(1 + snapshot_buffer->size() -
|
|
kClockSnapshotRingBufferSize);
|
|
}
|
|
snapshot_buffer->emplace_back(std::move(snapshot));
|
|
}
|
|
}
|
|
|
|
// Returns true when the data in |snapshot_data| is updated with the new state
|
|
// of the clocks and false otherwise.
|
|
bool TracingServiceImpl::SnapshotClocks(
|
|
TracingSession::ClockSnapshotData* snapshot_data) {
|
|
// Minimum drift that justifies replacing a prior clock snapshot that hasn't
|
|
// been emitted into the trace yet (see comment below).
|
|
static constexpr int64_t kSignificantDriftNs = 10 * 1000 * 1000; // 10 ms
|
|
|
|
TracingSession::ClockSnapshotData new_snapshot_data;
|
|
|
|
#if !PERFETTO_BUILDFLAG(PERFETTO_OS_APPLE) && \
|
|
!PERFETTO_BUILDFLAG(PERFETTO_OS_WIN) && \
|
|
!PERFETTO_BUILDFLAG(PERFETTO_OS_NACL)
|
|
struct {
|
|
clockid_t id;
|
|
protos::pbzero::BuiltinClock type;
|
|
struct timespec ts;
|
|
} clocks[] = {
|
|
{CLOCK_BOOTTIME, protos::pbzero::BUILTIN_CLOCK_BOOTTIME, {0, 0}},
|
|
{CLOCK_REALTIME_COARSE,
|
|
protos::pbzero::BUILTIN_CLOCK_REALTIME_COARSE,
|
|
{0, 0}},
|
|
{CLOCK_MONOTONIC_COARSE,
|
|
protos::pbzero::BUILTIN_CLOCK_MONOTONIC_COARSE,
|
|
{0, 0}},
|
|
{CLOCK_REALTIME, protos::pbzero::BUILTIN_CLOCK_REALTIME, {0, 0}},
|
|
{CLOCK_MONOTONIC, protos::pbzero::BUILTIN_CLOCK_MONOTONIC, {0, 0}},
|
|
{CLOCK_MONOTONIC_RAW,
|
|
protos::pbzero::BUILTIN_CLOCK_MONOTONIC_RAW,
|
|
{0, 0}},
|
|
};
|
|
// First snapshot all the clocks as atomically as we can.
|
|
for (auto& clock : clocks) {
|
|
if (clock_gettime(clock.id, &clock.ts) == -1)
|
|
PERFETTO_DLOG("clock_gettime failed for clock %d", clock.id);
|
|
}
|
|
for (auto& clock : clocks) {
|
|
new_snapshot_data.push_back(std::make_pair(
|
|
static_cast<uint32_t>(clock.type),
|
|
static_cast<uint64_t>(base::FromPosixTimespec(clock.ts).count())));
|
|
}
|
|
#else // !PERFETTO_BUILDFLAG(PERFETTO_OS_APPLE) &&
|
|
// !PERFETTO_BUILDFLAG(PERFETTO_OS_WIN) &&
|
|
// !PERFETTO_BUILDFLAG(PERFETTO_OS_NACL)
|
|
auto wall_time_ns = static_cast<uint64_t>(base::GetWallTimeNs().count());
|
|
// The default trace clock is boot time, so we always need to emit a path to
|
|
// it. However since we don't actually have a boot time source on these
|
|
// platforms, pretend that wall time equals boot time.
|
|
new_snapshot_data.push_back(
|
|
std::make_pair(protos::pbzero::BUILTIN_CLOCK_BOOTTIME, wall_time_ns));
|
|
new_snapshot_data.push_back(
|
|
std::make_pair(protos::pbzero::BUILTIN_CLOCK_MONOTONIC, wall_time_ns));
|
|
#endif // !PERFETTO_BUILDFLAG(PERFETTO_OS_APPLE) &&
|
|
// !PERFETTO_BUILDFLAG(PERFETTO_OS_WIN) &&
|
|
// !PERFETTO_BUILDFLAG(PERFETTO_OS_NACL)
|
|
|
|
// If we're about to update a session's latest clock snapshot that hasn't been
|
|
// emitted into the trace yet, check whether the clocks have drifted enough to
|
|
// warrant overriding the current snapshot values. The older snapshot would be
|
|
// valid for a larger part of the currently buffered trace data because the
|
|
// clock sync protocol in trace processor uses the latest clock <= timestamp
|
|
// to translate times (see https://perfetto.dev/docs/concepts/clock-sync), so
|
|
// we try to keep it if we can.
|
|
if (!snapshot_data->empty()) {
|
|
PERFETTO_DCHECK(snapshot_data->size() == new_snapshot_data.size());
|
|
PERFETTO_DCHECK((*snapshot_data)[0].first ==
|
|
protos::gen::BUILTIN_CLOCK_BOOTTIME);
|
|
|
|
bool update_snapshot = false;
|
|
uint64_t old_boot_ns = (*snapshot_data)[0].second;
|
|
uint64_t new_boot_ns = new_snapshot_data[0].second;
|
|
int64_t boot_diff =
|
|
static_cast<int64_t>(new_boot_ns) - static_cast<int64_t>(old_boot_ns);
|
|
|
|
for (size_t i = 1; i < snapshot_data->size(); i++) {
|
|
uint64_t old_ns = (*snapshot_data)[i].second;
|
|
uint64_t new_ns = new_snapshot_data[i].second;
|
|
|
|
int64_t diff =
|
|
static_cast<int64_t>(new_ns) - static_cast<int64_t>(old_ns);
|
|
|
|
// Compare the boottime delta against the delta of this clock.
|
|
if (std::abs(boot_diff - diff) >= kSignificantDriftNs) {
|
|
update_snapshot = true;
|
|
break;
|
|
}
|
|
}
|
|
if (!update_snapshot)
|
|
return false;
|
|
snapshot_data->clear();
|
|
}
|
|
|
|
*snapshot_data = std::move(new_snapshot_data);
|
|
return true;
|
|
}
|
|
|
|
void TracingServiceImpl::EmitClockSnapshot(
|
|
TracingSession* tracing_session,
|
|
TracingSession::ClockSnapshotData snapshot_data,
|
|
std::vector<TracePacket>* packets) {
|
|
PERFETTO_DCHECK(!tracing_session->config.builtin_data_sources()
|
|
.disable_clock_snapshotting());
|
|
|
|
protozero::HeapBuffered<protos::pbzero::TracePacket> packet;
|
|
auto* snapshot = packet->set_clock_snapshot();
|
|
|
|
protos::gen::BuiltinClock trace_clock =
|
|
tracing_session->config.builtin_data_sources().primary_trace_clock();
|
|
if (!trace_clock)
|
|
trace_clock = protos::gen::BUILTIN_CLOCK_BOOTTIME;
|
|
snapshot->set_primary_trace_clock(
|
|
static_cast<protos::pbzero::BuiltinClock>(trace_clock));
|
|
|
|
for (auto& clock_id_and_ts : snapshot_data) {
|
|
auto* c = snapshot->add_clocks();
|
|
c->set_clock_id(clock_id_and_ts.first);
|
|
c->set_timestamp(clock_id_and_ts.second);
|
|
}
|
|
|
|
packet->set_trusted_uid(static_cast<int32_t>(uid_));
|
|
packet->set_trusted_packet_sequence_id(kServicePacketSequenceID);
|
|
SerializeAndAppendPacket(packets, packet.SerializeAsArray());
|
|
}
|
|
|
|
void TracingServiceImpl::EmitSyncMarker(std::vector<TracePacket>* packets) {
|
|
// The sync marks are used to tokenize large traces efficiently.
|
|
// See description in trace_packet.proto.
|
|
if (sync_marker_packet_size_ == 0) {
|
|
// The marker ABI expects that the marker is written after the uid.
|
|
// Protozero guarantees that fields are written in the same order of the
|
|
// calls. The ResynchronizeTraceStreamUsingSyncMarker test verifies the ABI.
|
|
protozero::StaticBuffered<protos::pbzero::TracePacket> packet(
|
|
&sync_marker_packet_[0], sizeof(sync_marker_packet_));
|
|
packet->set_trusted_uid(static_cast<int32_t>(uid_));
|
|
packet->set_trusted_packet_sequence_id(kServicePacketSequenceID);
|
|
|
|
// Keep this last.
|
|
packet->set_synchronization_marker(kSyncMarker, sizeof(kSyncMarker));
|
|
sync_marker_packet_size_ = packet.Finalize();
|
|
}
|
|
packets->emplace_back();
|
|
packets->back().AddSlice(&sync_marker_packet_[0], sync_marker_packet_size_);
|
|
}
|
|
|
|
void TracingServiceImpl::EmitStats(TracingSession* tracing_session,
|
|
std::vector<TracePacket>* packets) {
|
|
protozero::HeapBuffered<protos::pbzero::TracePacket> packet;
|
|
packet->set_trusted_uid(static_cast<int32_t>(uid_));
|
|
packet->set_trusted_packet_sequence_id(kServicePacketSequenceID);
|
|
GetTraceStats(tracing_session).Serialize(packet->set_trace_stats());
|
|
SerializeAndAppendPacket(packets, packet.SerializeAsArray());
|
|
}
|
|
|
|
TraceStats TracingServiceImpl::GetTraceStats(TracingSession* tracing_session) {
|
|
TraceStats trace_stats;
|
|
trace_stats.set_producers_connected(static_cast<uint32_t>(producers_.size()));
|
|
trace_stats.set_producers_seen(last_producer_id_);
|
|
trace_stats.set_data_sources_registered(
|
|
static_cast<uint32_t>(data_sources_.size()));
|
|
trace_stats.set_data_sources_seen(last_data_source_instance_id_);
|
|
trace_stats.set_tracing_sessions(
|
|
static_cast<uint32_t>(tracing_sessions_.size()));
|
|
trace_stats.set_total_buffers(static_cast<uint32_t>(buffers_.size()));
|
|
trace_stats.set_chunks_discarded(chunks_discarded_);
|
|
trace_stats.set_patches_discarded(patches_discarded_);
|
|
trace_stats.set_invalid_packets(tracing_session->invalid_packets);
|
|
|
|
for (BufferID buf_id : tracing_session->buffers_index) {
|
|
TraceBuffer* buf = GetBufferByID(buf_id);
|
|
if (!buf) {
|
|
PERFETTO_DFATAL("Buffer not found.");
|
|
continue;
|
|
}
|
|
*trace_stats.add_buffer_stats() = buf->stats();
|
|
} // for (buf in session).
|
|
return trace_stats;
|
|
}
|
|
|
|
void TracingServiceImpl::MaybeEmitTraceConfig(
|
|
TracingSession* tracing_session,
|
|
std::vector<TracePacket>* packets) {
|
|
if (tracing_session->did_emit_config)
|
|
return;
|
|
tracing_session->did_emit_config = true;
|
|
protozero::HeapBuffered<protos::pbzero::TracePacket> packet;
|
|
packet->set_trusted_uid(static_cast<int32_t>(uid_));
|
|
packet->set_trusted_packet_sequence_id(kServicePacketSequenceID);
|
|
tracing_session->config.Serialize(packet->set_trace_config());
|
|
SerializeAndAppendPacket(packets, packet.SerializeAsArray());
|
|
}
|
|
|
|
void TracingServiceImpl::MaybeEmitSystemInfo(
|
|
TracingSession* tracing_session,
|
|
std::vector<TracePacket>* packets) {
|
|
if (tracing_session->did_emit_system_info)
|
|
return;
|
|
tracing_session->did_emit_system_info = true;
|
|
protozero::HeapBuffered<protos::pbzero::TracePacket> packet;
|
|
auto* info = packet->set_system_info();
|
|
base::ignore_result(info); // For PERFETTO_OS_WIN.
|
|
#if !PERFETTO_BUILDFLAG(PERFETTO_OS_WIN) && \
|
|
!PERFETTO_BUILDFLAG(PERFETTO_OS_NACL)
|
|
struct utsname uname_info;
|
|
if (uname(&uname_info) == 0) {
|
|
auto* utsname_info = info->set_utsname();
|
|
utsname_info->set_sysname(uname_info.sysname);
|
|
utsname_info->set_version(uname_info.version);
|
|
utsname_info->set_machine(uname_info.machine);
|
|
utsname_info->set_release(uname_info.release);
|
|
}
|
|
#endif // !PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_OS_ANDROID)
|
|
char value[PROP_VALUE_MAX];
|
|
if (__system_property_get("ro.build.fingerprint", value)) {
|
|
info->set_android_build_fingerprint(value);
|
|
} else {
|
|
PERFETTO_ELOG("Unable to read ro.build.fingerprint");
|
|
}
|
|
info->set_hz(sysconf(_SC_CLK_TCK));
|
|
#endif // PERFETTO_BUILDFLAG(PERFETTO_OS_ANDROID)
|
|
packet->set_trusted_uid(static_cast<int32_t>(uid_));
|
|
packet->set_trusted_packet_sequence_id(kServicePacketSequenceID);
|
|
SerializeAndAppendPacket(packets, packet.SerializeAsArray());
|
|
}
|
|
|
|
void TracingServiceImpl::EmitLifecycleEvents(
|
|
TracingSession* tracing_session,
|
|
std::vector<TracePacket>* packets) {
|
|
using TimestampedPacket =
|
|
std::pair<int64_t /* ts */, std::vector<uint8_t> /* serialized packet */>;
|
|
|
|
std::vector<TimestampedPacket> timestamped_packets;
|
|
for (auto& event : tracing_session->lifecycle_events) {
|
|
for (int64_t ts : event.timestamps) {
|
|
protozero::HeapBuffered<protos::pbzero::TracePacket> packet;
|
|
packet->set_timestamp(static_cast<uint64_t>(ts));
|
|
packet->set_trusted_uid(static_cast<int32_t>(uid_));
|
|
packet->set_trusted_packet_sequence_id(kServicePacketSequenceID);
|
|
|
|
auto* service_event = packet->set_service_event();
|
|
service_event->AppendVarInt(event.field_id, 1);
|
|
timestamped_packets.emplace_back(ts, packet.SerializeAsArray());
|
|
}
|
|
event.timestamps.clear();
|
|
}
|
|
|
|
// We sort by timestamp here to ensure that the "sequence" of lifecycle
|
|
// packets has monotonic timestamps like other sequences in the trace.
|
|
// Note that these events could still be out of order with respect to other
|
|
// events on the service packet sequence (e.g. trigger received packets).
|
|
std::sort(timestamped_packets.begin(), timestamped_packets.end(),
|
|
[](const TimestampedPacket& a, const TimestampedPacket& b) {
|
|
return a.first < b.first;
|
|
});
|
|
|
|
for (const auto& pair : timestamped_packets)
|
|
SerializeAndAppendPacket(packets, std::move(pair.second));
|
|
}
|
|
|
|
void TracingServiceImpl::MaybeEmitReceivedTriggers(
|
|
TracingSession* tracing_session,
|
|
std::vector<TracePacket>* packets) {
|
|
PERFETTO_DCHECK(tracing_session->num_triggers_emitted_into_trace <=
|
|
tracing_session->received_triggers.size());
|
|
for (size_t i = tracing_session->num_triggers_emitted_into_trace;
|
|
i < tracing_session->received_triggers.size(); ++i) {
|
|
const auto& info = tracing_session->received_triggers[i];
|
|
protozero::HeapBuffered<protos::pbzero::TracePacket> packet;
|
|
auto* trigger = packet->set_trigger();
|
|
trigger->set_trigger_name(info.trigger_name);
|
|
trigger->set_producer_name(info.producer_name);
|
|
trigger->set_trusted_producer_uid(static_cast<int32_t>(info.producer_uid));
|
|
|
|
packet->set_timestamp(info.boot_time_ns);
|
|
packet->set_trusted_uid(static_cast<int32_t>(uid_));
|
|
packet->set_trusted_packet_sequence_id(kServicePacketSequenceID);
|
|
SerializeAndAppendPacket(packets, packet.SerializeAsArray());
|
|
++tracing_session->num_triggers_emitted_into_trace;
|
|
}
|
|
}
|
|
|
|
////////////////////////////////////////////////////////////////////////////////
|
|
// TracingServiceImpl::ConsumerEndpointImpl implementation
|
|
////////////////////////////////////////////////////////////////////////////////
|
|
|
|
TracingServiceImpl::ConsumerEndpointImpl::ConsumerEndpointImpl(
|
|
TracingServiceImpl* service,
|
|
base::TaskRunner* task_runner,
|
|
Consumer* consumer,
|
|
uid_t uid)
|
|
: task_runner_(task_runner),
|
|
service_(service),
|
|
consumer_(consumer),
|
|
uid_(uid),
|
|
weak_ptr_factory_(this) {}
|
|
|
|
TracingServiceImpl::ConsumerEndpointImpl::~ConsumerEndpointImpl() {
|
|
service_->DisconnectConsumer(this);
|
|
consumer_->OnDisconnect();
|
|
}
|
|
|
|
void TracingServiceImpl::ConsumerEndpointImpl::NotifyOnTracingDisabled() {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
auto weak_this = GetWeakPtr();
|
|
task_runner_->PostTask([weak_this] {
|
|
if (weak_this)
|
|
weak_this->consumer_->OnTracingDisabled();
|
|
});
|
|
}
|
|
|
|
void TracingServiceImpl::ConsumerEndpointImpl::EnableTracing(
|
|
const TraceConfig& cfg,
|
|
base::ScopedFile fd) {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
if (!service_->EnableTracing(this, cfg, std::move(fd)))
|
|
NotifyOnTracingDisabled();
|
|
}
|
|
|
|
void TracingServiceImpl::ConsumerEndpointImpl::ChangeTraceConfig(
|
|
const TraceConfig& cfg) {
|
|
if (!tracing_session_id_) {
|
|
PERFETTO_LOG(
|
|
"Consumer called ChangeTraceConfig() but tracing was "
|
|
"not active");
|
|
return;
|
|
}
|
|
service_->ChangeTraceConfig(this, cfg);
|
|
}
|
|
|
|
void TracingServiceImpl::ConsumerEndpointImpl::StartTracing() {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
if (!tracing_session_id_) {
|
|
PERFETTO_LOG("Consumer called StartTracing() but tracing was not active");
|
|
return;
|
|
}
|
|
service_->StartTracing(tracing_session_id_);
|
|
}
|
|
|
|
void TracingServiceImpl::ConsumerEndpointImpl::DisableTracing() {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
if (!tracing_session_id_) {
|
|
PERFETTO_LOG("Consumer called DisableTracing() but tracing was not active");
|
|
return;
|
|
}
|
|
service_->DisableTracing(tracing_session_id_);
|
|
}
|
|
|
|
void TracingServiceImpl::ConsumerEndpointImpl::ReadBuffers() {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
if (!tracing_session_id_) {
|
|
PERFETTO_LOG("Consumer called ReadBuffers() but tracing was not active");
|
|
consumer_->OnTraceData({}, /* has_more = */ false);
|
|
return;
|
|
}
|
|
if (!service_->ReadBuffers(tracing_session_id_, this)) {
|
|
consumer_->OnTraceData({}, /* has_more = */ false);
|
|
}
|
|
}
|
|
|
|
void TracingServiceImpl::ConsumerEndpointImpl::FreeBuffers() {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
if (!tracing_session_id_) {
|
|
PERFETTO_LOG("Consumer called FreeBuffers() but tracing was not active");
|
|
return;
|
|
}
|
|
service_->FreeBuffers(tracing_session_id_);
|
|
tracing_session_id_ = 0;
|
|
}
|
|
|
|
void TracingServiceImpl::ConsumerEndpointImpl::Flush(uint32_t timeout_ms,
|
|
FlushCallback callback) {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
if (!tracing_session_id_) {
|
|
PERFETTO_LOG("Consumer called Flush() but tracing was not active");
|
|
return;
|
|
}
|
|
service_->Flush(tracing_session_id_, timeout_ms, callback);
|
|
}
|
|
|
|
void TracingServiceImpl::ConsumerEndpointImpl::Detach(const std::string& key) {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
bool success = service_->DetachConsumer(this, key);
|
|
auto weak_this = GetWeakPtr();
|
|
task_runner_->PostTask([weak_this, success] {
|
|
if (weak_this)
|
|
weak_this->consumer_->OnDetach(success);
|
|
});
|
|
}
|
|
|
|
void TracingServiceImpl::ConsumerEndpointImpl::Attach(const std::string& key) {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
bool success = service_->AttachConsumer(this, key);
|
|
auto weak_this = GetWeakPtr();
|
|
task_runner_->PostTask([weak_this, success] {
|
|
if (!weak_this)
|
|
return;
|
|
Consumer* consumer = weak_this->consumer_;
|
|
TracingSession* session =
|
|
weak_this->service_->GetTracingSession(weak_this->tracing_session_id_);
|
|
if (!session) {
|
|
consumer->OnAttach(false, TraceConfig());
|
|
return;
|
|
}
|
|
consumer->OnAttach(success, session->config);
|
|
});
|
|
}
|
|
|
|
void TracingServiceImpl::ConsumerEndpointImpl::GetTraceStats() {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
bool success = false;
|
|
TraceStats stats;
|
|
TracingSession* session = service_->GetTracingSession(tracing_session_id_);
|
|
if (session) {
|
|
success = true;
|
|
stats = service_->GetTraceStats(session);
|
|
}
|
|
auto weak_this = GetWeakPtr();
|
|
task_runner_->PostTask([weak_this, success, stats] {
|
|
if (weak_this)
|
|
weak_this->consumer_->OnTraceStats(success, stats);
|
|
});
|
|
}
|
|
|
|
void TracingServiceImpl::ConsumerEndpointImpl::ObserveEvents(
|
|
uint32_t events_mask) {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
observable_events_mask_ = events_mask;
|
|
TracingSession* session = service_->GetTracingSession(tracing_session_id_);
|
|
if (!session)
|
|
return;
|
|
|
|
if (observable_events_mask_ & ObservableEvents::TYPE_DATA_SOURCES_INSTANCES) {
|
|
// Issue initial states.
|
|
for (const auto& kv : session->data_source_instances) {
|
|
ProducerEndpointImpl* producer = service_->GetProducer(kv.first);
|
|
PERFETTO_DCHECK(producer);
|
|
OnDataSourceInstanceStateChange(*producer, kv.second);
|
|
}
|
|
}
|
|
|
|
// If the ObserveEvents() call happens after data sources have acked already
|
|
// notify immediately.
|
|
if (observable_events_mask_ &
|
|
ObservableEvents::TYPE_ALL_DATA_SOURCES_STARTED) {
|
|
service_->MaybeNotifyAllDataSourcesStarted(session);
|
|
}
|
|
}
|
|
|
|
void TracingServiceImpl::ConsumerEndpointImpl::OnDataSourceInstanceStateChange(
|
|
const ProducerEndpointImpl& producer,
|
|
const DataSourceInstance& instance) {
|
|
if (!(observable_events_mask_ &
|
|
ObservableEvents::TYPE_DATA_SOURCES_INSTANCES)) {
|
|
return;
|
|
}
|
|
|
|
if (instance.state != DataSourceInstance::CONFIGURED &&
|
|
instance.state != DataSourceInstance::STARTED &&
|
|
instance.state != DataSourceInstance::STOPPED) {
|
|
return;
|
|
}
|
|
|
|
auto* observable_events = AddObservableEvents();
|
|
auto* change = observable_events->add_instance_state_changes();
|
|
change->set_producer_name(producer.name_);
|
|
change->set_data_source_name(instance.data_source_name);
|
|
if (instance.state == DataSourceInstance::STARTED) {
|
|
change->set_state(ObservableEvents::DATA_SOURCE_INSTANCE_STATE_STARTED);
|
|
} else {
|
|
change->set_state(ObservableEvents::DATA_SOURCE_INSTANCE_STATE_STOPPED);
|
|
}
|
|
}
|
|
|
|
void TracingServiceImpl::ConsumerEndpointImpl::OnAllDataSourcesStarted() {
|
|
if (!(observable_events_mask_ &
|
|
ObservableEvents::TYPE_ALL_DATA_SOURCES_STARTED)) {
|
|
return;
|
|
}
|
|
auto* observable_events = AddObservableEvents();
|
|
observable_events->set_all_data_sources_started(true);
|
|
}
|
|
|
|
base::WeakPtr<TracingServiceImpl::ConsumerEndpointImpl>
|
|
TracingServiceImpl::ConsumerEndpointImpl::GetWeakPtr() {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
return weak_ptr_factory_.GetWeakPtr();
|
|
}
|
|
|
|
ObservableEvents*
|
|
TracingServiceImpl::ConsumerEndpointImpl::AddObservableEvents() {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
if (!observable_events_) {
|
|
observable_events_.reset(new ObservableEvents());
|
|
auto weak_this = GetWeakPtr();
|
|
task_runner_->PostTask([weak_this] {
|
|
if (!weak_this)
|
|
return;
|
|
|
|
// Move into a temporary to allow reentrancy in OnObservableEvents.
|
|
auto observable_events = std::move(weak_this->observable_events_);
|
|
weak_this->consumer_->OnObservableEvents(*observable_events);
|
|
});
|
|
}
|
|
return observable_events_.get();
|
|
}
|
|
|
|
void TracingServiceImpl::ConsumerEndpointImpl::QueryServiceState(
|
|
QueryServiceStateCallback callback) {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
TracingServiceState svc_state;
|
|
|
|
const auto& sessions = service_->tracing_sessions_;
|
|
svc_state.set_num_sessions(static_cast<int>(sessions.size()));
|
|
|
|
int num_started = 0;
|
|
for (const auto& kv : sessions)
|
|
num_started += kv.second.state == TracingSession::State::STARTED ? 1 : 0;
|
|
svc_state.set_num_sessions_started(static_cast<int>(num_started));
|
|
|
|
for (const auto& kv : service_->producers_) {
|
|
auto* producer = svc_state.add_producers();
|
|
producer->set_id(static_cast<int>(kv.first));
|
|
producer->set_name(kv.second->name_);
|
|
producer->set_uid(static_cast<int32_t>(producer->uid()));
|
|
}
|
|
|
|
for (const auto& kv : service_->data_sources_) {
|
|
const auto& registered_data_source = kv.second;
|
|
auto* data_source = svc_state.add_data_sources();
|
|
*data_source->mutable_ds_descriptor() = registered_data_source.descriptor;
|
|
data_source->set_producer_id(
|
|
static_cast<int>(registered_data_source.producer_id));
|
|
}
|
|
callback(/*success=*/true, svc_state);
|
|
}
|
|
|
|
void TracingServiceImpl::ConsumerEndpointImpl::QueryCapabilities(
|
|
QueryCapabilitiesCallback callback) {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
TracingServiceCapabilities caps;
|
|
caps.set_has_query_capabilities(true);
|
|
caps.set_has_trace_config_output_path(true);
|
|
caps.add_observable_events(ObservableEvents::TYPE_DATA_SOURCES_INSTANCES);
|
|
caps.add_observable_events(ObservableEvents::TYPE_ALL_DATA_SOURCES_STARTED);
|
|
static_assert(ObservableEvents::Type_MAX ==
|
|
ObservableEvents::TYPE_ALL_DATA_SOURCES_STARTED,
|
|
"");
|
|
callback(caps);
|
|
}
|
|
|
|
////////////////////////////////////////////////////////////////////////////////
|
|
// TracingServiceImpl::ProducerEndpointImpl implementation
|
|
////////////////////////////////////////////////////////////////////////////////
|
|
|
|
TracingServiceImpl::ProducerEndpointImpl::ProducerEndpointImpl(
|
|
ProducerID id,
|
|
uid_t uid,
|
|
TracingServiceImpl* service,
|
|
base::TaskRunner* task_runner,
|
|
Producer* producer,
|
|
const std::string& producer_name,
|
|
bool in_process,
|
|
bool smb_scraping_enabled)
|
|
: id_(id),
|
|
uid_(uid),
|
|
service_(service),
|
|
task_runner_(task_runner),
|
|
producer_(producer),
|
|
name_(producer_name),
|
|
in_process_(in_process),
|
|
smb_scraping_enabled_(smb_scraping_enabled),
|
|
weak_ptr_factory_(this) {}
|
|
|
|
TracingServiceImpl::ProducerEndpointImpl::~ProducerEndpointImpl() {
|
|
service_->DisconnectProducer(id_);
|
|
producer_->OnDisconnect();
|
|
}
|
|
|
|
void TracingServiceImpl::ProducerEndpointImpl::RegisterDataSource(
|
|
const DataSourceDescriptor& desc) {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
if (desc.name().empty()) {
|
|
PERFETTO_DLOG("Received RegisterDataSource() with empty name");
|
|
return;
|
|
}
|
|
|
|
service_->RegisterDataSource(id_, desc);
|
|
}
|
|
|
|
void TracingServiceImpl::ProducerEndpointImpl::UnregisterDataSource(
|
|
const std::string& name) {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
service_->UnregisterDataSource(id_, name);
|
|
}
|
|
|
|
void TracingServiceImpl::ProducerEndpointImpl::RegisterTraceWriter(
|
|
uint32_t writer_id,
|
|
uint32_t target_buffer) {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
writers_[static_cast<WriterID>(writer_id)] =
|
|
static_cast<BufferID>(target_buffer);
|
|
}
|
|
|
|
void TracingServiceImpl::ProducerEndpointImpl::UnregisterTraceWriter(
|
|
uint32_t writer_id) {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
writers_.erase(static_cast<WriterID>(writer_id));
|
|
}
|
|
|
|
void TracingServiceImpl::ProducerEndpointImpl::CommitData(
|
|
const CommitDataRequest& req_untrusted,
|
|
CommitDataCallback callback) {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
|
|
if (metatrace::IsEnabled(metatrace::TAG_TRACE_SERVICE)) {
|
|
PERFETTO_METATRACE_COUNTER(TAG_TRACE_SERVICE, TRACE_SERVICE_COMMIT_DATA,
|
|
EncodeCommitDataRequest(id_, req_untrusted));
|
|
}
|
|
|
|
if (!shared_memory_) {
|
|
PERFETTO_DLOG(
|
|
"Attempted to commit data before the shared memory was allocated.");
|
|
return;
|
|
}
|
|
PERFETTO_DCHECK(shmem_abi_.is_valid());
|
|
for (const auto& entry : req_untrusted.chunks_to_move()) {
|
|
const uint32_t page_idx = entry.page();
|
|
if (page_idx >= shmem_abi_.num_pages())
|
|
continue; // A buggy or malicious producer.
|
|
|
|
SharedMemoryABI::Chunk chunk =
|
|
shmem_abi_.TryAcquireChunkForReading(page_idx, entry.chunk());
|
|
if (!chunk.is_valid()) {
|
|
PERFETTO_DLOG("Asked to move chunk %d:%d, but it's not complete",
|
|
entry.page(), entry.chunk());
|
|
continue;
|
|
}
|
|
|
|
// TryAcquireChunkForReading() has load-acquire semantics. Once acquired,
|
|
// the ABI contract expects the producer to not touch the chunk anymore
|
|
// (until the service marks that as free). This is why all the reads below
|
|
// are just memory_order_relaxed. Also, the code here assumes that all this
|
|
// data can be malicious and just gives up if anything is malformed.
|
|
BufferID buffer_id = static_cast<BufferID>(entry.target_buffer());
|
|
const SharedMemoryABI::ChunkHeader& chunk_header = *chunk.header();
|
|
WriterID writer_id = chunk_header.writer_id.load(std::memory_order_relaxed);
|
|
ChunkID chunk_id = chunk_header.chunk_id.load(std::memory_order_relaxed);
|
|
auto packets = chunk_header.packets.load(std::memory_order_relaxed);
|
|
uint16_t num_fragments = packets.count;
|
|
uint8_t chunk_flags = packets.flags;
|
|
|
|
service_->CopyProducerPageIntoLogBuffer(
|
|
id_, uid_, writer_id, chunk_id, buffer_id, num_fragments, chunk_flags,
|
|
/*chunk_complete=*/true, chunk.payload_begin(), chunk.payload_size());
|
|
|
|
// This one has release-store semantics.
|
|
shmem_abi_.ReleaseChunkAsFree(std::move(chunk));
|
|
} // for(chunks_to_move)
|
|
|
|
service_->ApplyChunkPatches(id_, req_untrusted.chunks_to_patch());
|
|
|
|
if (req_untrusted.flush_request_id()) {
|
|
service_->NotifyFlushDoneForProducer(id_, req_untrusted.flush_request_id());
|
|
}
|
|
|
|
// Keep this invocation last. ProducerIPCService::CommitData() relies on this
|
|
// callback being invoked within the same callstack and not posted. If this
|
|
// changes, the code there needs to be changed accordingly.
|
|
if (callback)
|
|
callback();
|
|
}
|
|
|
|
void TracingServiceImpl::ProducerEndpointImpl::SetupSharedMemory(
|
|
std::unique_ptr<SharedMemory> shared_memory,
|
|
size_t page_size_bytes,
|
|
bool provided_by_producer) {
|
|
PERFETTO_DCHECK(!shared_memory_ && !shmem_abi_.is_valid());
|
|
PERFETTO_DCHECK(page_size_bytes % 1024 == 0);
|
|
|
|
shared_memory_ = std::move(shared_memory);
|
|
shared_buffer_page_size_kb_ = page_size_bytes / 1024;
|
|
is_shmem_provided_by_producer_ = provided_by_producer;
|
|
|
|
shmem_abi_.Initialize(reinterpret_cast<uint8_t*>(shared_memory_->start()),
|
|
shared_memory_->size(),
|
|
shared_buffer_page_size_kb() * 1024);
|
|
if (in_process_) {
|
|
inproc_shmem_arbiter_.reset(new SharedMemoryArbiterImpl(
|
|
shared_memory_->start(), shared_memory_->size(),
|
|
shared_buffer_page_size_kb_ * 1024, this, task_runner_));
|
|
}
|
|
|
|
OnTracingSetup();
|
|
service_->UpdateMemoryGuardrail();
|
|
}
|
|
|
|
SharedMemory* TracingServiceImpl::ProducerEndpointImpl::shared_memory() const {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
return shared_memory_.get();
|
|
}
|
|
|
|
size_t TracingServiceImpl::ProducerEndpointImpl::shared_buffer_page_size_kb()
|
|
const {
|
|
return shared_buffer_page_size_kb_;
|
|
}
|
|
|
|
void TracingServiceImpl::ProducerEndpointImpl::ActivateTriggers(
|
|
const std::vector<std::string>& triggers) {
|
|
service_->ActivateTriggers(id_, triggers);
|
|
}
|
|
|
|
void TracingServiceImpl::ProducerEndpointImpl::StopDataSource(
|
|
DataSourceInstanceID ds_inst_id) {
|
|
// TODO(primiano): When we'll support tearing down the SMB, at this point we
|
|
// should send the Producer a TearDownTracing if all its data sources have
|
|
// been disabled (see b/77532839 and aosp/655179 PS1).
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
auto weak_this = weak_ptr_factory_.GetWeakPtr();
|
|
task_runner_->PostTask([weak_this, ds_inst_id] {
|
|
if (weak_this)
|
|
weak_this->producer_->StopDataSource(ds_inst_id);
|
|
});
|
|
}
|
|
|
|
SharedMemoryArbiter*
|
|
TracingServiceImpl::ProducerEndpointImpl::MaybeSharedMemoryArbiter() {
|
|
if (!inproc_shmem_arbiter_) {
|
|
PERFETTO_FATAL(
|
|
"The in-process SharedMemoryArbiter can only be used when "
|
|
"CreateProducer has been called with in_process=true and after tracing "
|
|
"has started.");
|
|
}
|
|
|
|
PERFETTO_DCHECK(in_process_);
|
|
return inproc_shmem_arbiter_.get();
|
|
}
|
|
|
|
bool TracingServiceImpl::ProducerEndpointImpl::IsShmemProvidedByProducer()
|
|
const {
|
|
return is_shmem_provided_by_producer_;
|
|
}
|
|
|
|
// Can be called on any thread.
|
|
std::unique_ptr<TraceWriter>
|
|
TracingServiceImpl::ProducerEndpointImpl::CreateTraceWriter(
|
|
BufferID buf_id,
|
|
BufferExhaustedPolicy buffer_exhausted_policy) {
|
|
PERFETTO_DCHECK(MaybeSharedMemoryArbiter());
|
|
return MaybeSharedMemoryArbiter()->CreateTraceWriter(buf_id,
|
|
buffer_exhausted_policy);
|
|
}
|
|
|
|
void TracingServiceImpl::ProducerEndpointImpl::NotifyFlushComplete(
|
|
FlushRequestID id) {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
PERFETTO_DCHECK(MaybeSharedMemoryArbiter());
|
|
return MaybeSharedMemoryArbiter()->NotifyFlushComplete(id);
|
|
}
|
|
|
|
void TracingServiceImpl::ProducerEndpointImpl::OnTracingSetup() {
|
|
auto weak_this = weak_ptr_factory_.GetWeakPtr();
|
|
task_runner_->PostTask([weak_this] {
|
|
if (weak_this)
|
|
weak_this->producer_->OnTracingSetup();
|
|
});
|
|
}
|
|
|
|
void TracingServiceImpl::ProducerEndpointImpl::Flush(
|
|
FlushRequestID flush_request_id,
|
|
const std::vector<DataSourceInstanceID>& data_sources) {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
auto weak_this = weak_ptr_factory_.GetWeakPtr();
|
|
task_runner_->PostTask([weak_this, flush_request_id, data_sources] {
|
|
if (weak_this) {
|
|
weak_this->producer_->Flush(flush_request_id, data_sources.data(),
|
|
data_sources.size());
|
|
}
|
|
});
|
|
}
|
|
|
|
void TracingServiceImpl::ProducerEndpointImpl::SetupDataSource(
|
|
DataSourceInstanceID ds_id,
|
|
const DataSourceConfig& config) {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
allowed_target_buffers_.insert(static_cast<BufferID>(config.target_buffer()));
|
|
auto weak_this = weak_ptr_factory_.GetWeakPtr();
|
|
task_runner_->PostTask([weak_this, ds_id, config] {
|
|
if (weak_this)
|
|
weak_this->producer_->SetupDataSource(ds_id, std::move(config));
|
|
});
|
|
}
|
|
|
|
void TracingServiceImpl::ProducerEndpointImpl::StartDataSource(
|
|
DataSourceInstanceID ds_id,
|
|
const DataSourceConfig& config) {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
auto weak_this = weak_ptr_factory_.GetWeakPtr();
|
|
task_runner_->PostTask([weak_this, ds_id, config] {
|
|
if (weak_this)
|
|
weak_this->producer_->StartDataSource(ds_id, std::move(config));
|
|
});
|
|
}
|
|
|
|
void TracingServiceImpl::ProducerEndpointImpl::NotifyDataSourceStarted(
|
|
DataSourceInstanceID data_source_id) {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
service_->NotifyDataSourceStarted(id_, data_source_id);
|
|
}
|
|
|
|
void TracingServiceImpl::ProducerEndpointImpl::NotifyDataSourceStopped(
|
|
DataSourceInstanceID data_source_id) {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
service_->NotifyDataSourceStopped(id_, data_source_id);
|
|
}
|
|
|
|
void TracingServiceImpl::ProducerEndpointImpl::OnFreeBuffers(
|
|
const std::vector<BufferID>& target_buffers) {
|
|
if (allowed_target_buffers_.empty())
|
|
return;
|
|
for (BufferID buffer : target_buffers)
|
|
allowed_target_buffers_.erase(buffer);
|
|
}
|
|
|
|
void TracingServiceImpl::ProducerEndpointImpl::ClearIncrementalState(
|
|
const std::vector<DataSourceInstanceID>& data_sources) {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
auto weak_this = weak_ptr_factory_.GetWeakPtr();
|
|
task_runner_->PostTask([weak_this, data_sources] {
|
|
if (weak_this) {
|
|
weak_this->producer_->ClearIncrementalState(data_sources.data(),
|
|
data_sources.size());
|
|
}
|
|
});
|
|
}
|
|
|
|
void TracingServiceImpl::ProducerEndpointImpl::Sync(
|
|
std::function<void()> callback) {
|
|
task_runner_->PostTask(callback);
|
|
}
|
|
|
|
////////////////////////////////////////////////////////////////////////////////
|
|
// TracingServiceImpl::TracingSession implementation
|
|
////////////////////////////////////////////////////////////////////////////////
|
|
|
|
TracingServiceImpl::TracingSession::TracingSession(
|
|
TracingSessionID session_id,
|
|
ConsumerEndpointImpl* consumer,
|
|
const TraceConfig& new_config)
|
|
: id(session_id),
|
|
consumer_maybe_null(consumer),
|
|
consumer_uid(consumer->uid_),
|
|
config(new_config) {
|
|
// all_data_sources_flushed is special because we store up to 64 events of
|
|
// this type. Other events will go through the default case in
|
|
// SnapshotLifecycleEvent() where they will be given a max history of 1.
|
|
lifecycle_events.emplace_back(
|
|
protos::pbzero::TracingServiceEvent::kAllDataSourcesFlushedFieldNumber,
|
|
64 /* max_size */);
|
|
}
|
|
|
|
} // namespace perfetto
|
|
// gen_amalgamated begin source: src/tracing/internal/in_process_tracing_backend.cc
|
|
/*
|
|
* Copyright (C) 2019 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/tracing/internal/in_process_tracing_backend.h"
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/base/logging.h"
|
|
// gen_amalgamated expanded: #include "perfetto/base/task_runner.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/paged_memory.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/shared_memory.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/tracing_service.h"
|
|
|
|
// TODO(primiano): When the in-process backend is used, we should never end up
|
|
// in a situation where the thread where the TracingService and Producer live
|
|
// writes a packet and hence can get into the GetNewChunk() stall.
|
|
// This would happen only if the API client code calls Trace() from one of the
|
|
// callbacks it receives (e.g. OnStart(), OnStop()). We should either cause a
|
|
// hard crash or ignore traces from that thread if that happens, because it
|
|
// will deadlock (the Service will never free up the SMB because won't ever get
|
|
// to run the task).
|
|
|
|
namespace perfetto {
|
|
namespace internal {
|
|
|
|
namespace {
|
|
|
|
class InProcessShm : public SharedMemory {
|
|
public:
|
|
explicit InProcessShm(size_t size);
|
|
~InProcessShm() override;
|
|
void* start() const override;
|
|
size_t size() const override;
|
|
int fd() const override;
|
|
|
|
private:
|
|
base::PagedMemory mem_;
|
|
};
|
|
|
|
class InProcessShmFactory : public SharedMemory::Factory {
|
|
public:
|
|
~InProcessShmFactory() override;
|
|
std::unique_ptr<SharedMemory> CreateSharedMemory(size_t) override;
|
|
};
|
|
|
|
InProcessShm::~InProcessShm() = default;
|
|
|
|
InProcessShm::InProcessShm(size_t size)
|
|
: mem_(base::PagedMemory::Allocate(size)) {}
|
|
|
|
void* InProcessShm::start() const {
|
|
return mem_.Get();
|
|
}
|
|
|
|
size_t InProcessShm::size() const {
|
|
return mem_.size();
|
|
}
|
|
|
|
int InProcessShm::fd() const {
|
|
return -1;
|
|
}
|
|
|
|
InProcessShmFactory::~InProcessShmFactory() = default;
|
|
std::unique_ptr<SharedMemory> InProcessShmFactory::CreateSharedMemory(
|
|
size_t size) {
|
|
return std::unique_ptr<SharedMemory>(new InProcessShm(size));
|
|
}
|
|
|
|
} // namespace
|
|
|
|
// static
|
|
TracingBackend* InProcessTracingBackend::GetInstance() {
|
|
static auto* instance = new InProcessTracingBackend();
|
|
return instance;
|
|
}
|
|
|
|
InProcessTracingBackend::InProcessTracingBackend() {}
|
|
|
|
std::unique_ptr<ProducerEndpoint> InProcessTracingBackend::ConnectProducer(
|
|
const ConnectProducerArgs& args) {
|
|
PERFETTO_DCHECK(args.task_runner->RunsTasksOnCurrentThread());
|
|
|
|
// This should never happen as we can have at most one in-process backend.
|
|
if (service_)
|
|
PERFETTO_FATAL("InProcessTracingBackend initialized twice");
|
|
|
|
return GetOrCreateService(args.task_runner)
|
|
->ConnectProducer(args.producer, /*uid=*/0, args.producer_name,
|
|
args.shmem_size_hint_bytes,
|
|
/*in_process=*/true,
|
|
TracingService::ProducerSMBScrapingMode::kEnabled,
|
|
args.shmem_page_size_hint_bytes);
|
|
}
|
|
|
|
std::unique_ptr<ConsumerEndpoint> InProcessTracingBackend::ConnectConsumer(
|
|
const ConnectConsumerArgs& args) {
|
|
return GetOrCreateService(args.task_runner)
|
|
->ConnectConsumer(args.consumer, /*uid=*/0);
|
|
}
|
|
|
|
TracingService* InProcessTracingBackend::GetOrCreateService(
|
|
base::TaskRunner* task_runner) {
|
|
if (!service_) {
|
|
std::unique_ptr<InProcessShmFactory> shm(new InProcessShmFactory());
|
|
service_ = TracingService::CreateInstance(std::move(shm), task_runner);
|
|
service_->SetSMBScrapingEnabled(true);
|
|
}
|
|
return service_.get();
|
|
}
|
|
|
|
} // namespace internal
|
|
} // namespace perfetto
|
|
// gen_amalgamated begin source: gen/protos/perfetto/ipc/consumer_port.gen.cc
|
|
// gen_amalgamated begin header: gen/protos/perfetto/ipc/consumer_port.gen.h
|
|
// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
|
|
#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_IPC_CONSUMER_PORT_PROTO_CPP_H_
|
|
#define PERFETTO_PROTOS_PROTOS_PERFETTO_IPC_CONSUMER_PORT_PROTO_CPP_H_
|
|
|
|
#include <stdint.h>
|
|
#include <bitset>
|
|
#include <vector>
|
|
#include <string>
|
|
#include <type_traits>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/cpp_message_obj.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/copyable_ptr.h"
|
|
// gen_amalgamated expanded: #include "perfetto/base/export.h"
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace gen {
|
|
class QueryCapabilitiesResponse;
|
|
class TracingServiceCapabilities;
|
|
class QueryCapabilitiesRequest;
|
|
class QueryServiceStateResponse;
|
|
class TracingServiceState;
|
|
class TracingServiceState_DataSource;
|
|
class DataSourceDescriptor;
|
|
class TracingServiceState_Producer;
|
|
class QueryServiceStateRequest;
|
|
class ObserveEventsResponse;
|
|
class ObservableEvents;
|
|
class ObservableEvents_DataSourceInstanceStateChange;
|
|
class ObserveEventsRequest;
|
|
class GetTraceStatsResponse;
|
|
class TraceStats;
|
|
class TraceStats_BufferStats;
|
|
class GetTraceStatsRequest;
|
|
class AttachResponse;
|
|
class TraceConfig;
|
|
class TraceConfig_IncidentReportConfig;
|
|
class TraceConfig_IncrementalStateConfig;
|
|
class TraceConfig_TriggerConfig;
|
|
class TraceConfig_TriggerConfig_Trigger;
|
|
class TraceConfig_GuardrailOverrides;
|
|
class TraceConfig_StatsdMetadata;
|
|
class TraceConfig_ProducerConfig;
|
|
class TraceConfig_BuiltinDataSource;
|
|
class TraceConfig_DataSource;
|
|
class DataSourceConfig;
|
|
class TestConfig;
|
|
class TestConfig_DummyFields;
|
|
class ChromeConfig;
|
|
class TraceConfig_BufferConfig;
|
|
class AttachRequest;
|
|
class DetachResponse;
|
|
class DetachRequest;
|
|
class FlushResponse;
|
|
class FlushRequest;
|
|
class FreeBuffersResponse;
|
|
class FreeBuffersRequest;
|
|
class ReadBuffersResponse;
|
|
class ReadBuffersResponse_Slice;
|
|
class ReadBuffersRequest;
|
|
class DisableTracingResponse;
|
|
class DisableTracingRequest;
|
|
class ChangeTraceConfigResponse;
|
|
class ChangeTraceConfigRequest;
|
|
class StartTracingResponse;
|
|
class StartTracingRequest;
|
|
class EnableTracingResponse;
|
|
class EnableTracingRequest;
|
|
enum ObservableEvents_Type : int;
|
|
enum ObservableEvents_DataSourceInstanceState : int;
|
|
enum TraceConfig_LockdownModeOperation : int;
|
|
enum TraceConfig_CompressionType : int;
|
|
enum TraceConfig_TriggerConfig_TriggerMode : int;
|
|
enum BuiltinClock : int;
|
|
enum TraceConfig_BufferConfig_FillPolicy : int;
|
|
} // namespace perfetto
|
|
} // namespace protos
|
|
} // namespace gen
|
|
|
|
namespace protozero {
|
|
class Message;
|
|
} // namespace protozero
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace gen {
|
|
|
|
class PERFETTO_EXPORT QueryCapabilitiesResponse : public ::protozero::CppMessageObj {
|
|
public:
|
|
enum FieldNumbers {
|
|
kCapabilitiesFieldNumber = 1,
|
|
};
|
|
|
|
QueryCapabilitiesResponse();
|
|
~QueryCapabilitiesResponse() override;
|
|
QueryCapabilitiesResponse(QueryCapabilitiesResponse&&) noexcept;
|
|
QueryCapabilitiesResponse& operator=(QueryCapabilitiesResponse&&);
|
|
QueryCapabilitiesResponse(const QueryCapabilitiesResponse&);
|
|
QueryCapabilitiesResponse& operator=(const QueryCapabilitiesResponse&);
|
|
bool operator==(const QueryCapabilitiesResponse&) const;
|
|
bool operator!=(const QueryCapabilitiesResponse& other) const { return !(*this == other); }
|
|
|
|
bool ParseFromArray(const void*, size_t) override;
|
|
std::string SerializeAsString() const override;
|
|
std::vector<uint8_t> SerializeAsArray() const override;
|
|
void Serialize(::protozero::Message*) const;
|
|
|
|
bool has_capabilities() const { return _has_field_[1]; }
|
|
const TracingServiceCapabilities& capabilities() const { return *capabilities_; }
|
|
TracingServiceCapabilities* mutable_capabilities() { _has_field_.set(1); return capabilities_.get(); }
|
|
|
|
private:
|
|
::protozero::CopyablePtr<TracingServiceCapabilities> capabilities_;
|
|
|
|
// Allows to preserve unknown protobuf fields for compatibility
|
|
// with future versions of .proto files.
|
|
std::string unknown_fields_;
|
|
|
|
std::bitset<2> _has_field_{};
|
|
};
|
|
|
|
|
|
class PERFETTO_EXPORT QueryCapabilitiesRequest : public ::protozero::CppMessageObj {
|
|
public:
|
|
enum FieldNumbers {
|
|
};
|
|
|
|
QueryCapabilitiesRequest();
|
|
~QueryCapabilitiesRequest() override;
|
|
QueryCapabilitiesRequest(QueryCapabilitiesRequest&&) noexcept;
|
|
QueryCapabilitiesRequest& operator=(QueryCapabilitiesRequest&&);
|
|
QueryCapabilitiesRequest(const QueryCapabilitiesRequest&);
|
|
QueryCapabilitiesRequest& operator=(const QueryCapabilitiesRequest&);
|
|
bool operator==(const QueryCapabilitiesRequest&) const;
|
|
bool operator!=(const QueryCapabilitiesRequest& other) const { return !(*this == other); }
|
|
|
|
bool ParseFromArray(const void*, size_t) override;
|
|
std::string SerializeAsString() const override;
|
|
std::vector<uint8_t> SerializeAsArray() const override;
|
|
void Serialize(::protozero::Message*) const;
|
|
|
|
private:
|
|
|
|
// Allows to preserve unknown protobuf fields for compatibility
|
|
// with future versions of .proto files.
|
|
std::string unknown_fields_;
|
|
|
|
std::bitset<2> _has_field_{};
|
|
};
|
|
|
|
|
|
class PERFETTO_EXPORT QueryServiceStateResponse : public ::protozero::CppMessageObj {
|
|
public:
|
|
enum FieldNumbers {
|
|
kServiceStateFieldNumber = 1,
|
|
};
|
|
|
|
QueryServiceStateResponse();
|
|
~QueryServiceStateResponse() override;
|
|
QueryServiceStateResponse(QueryServiceStateResponse&&) noexcept;
|
|
QueryServiceStateResponse& operator=(QueryServiceStateResponse&&);
|
|
QueryServiceStateResponse(const QueryServiceStateResponse&);
|
|
QueryServiceStateResponse& operator=(const QueryServiceStateResponse&);
|
|
bool operator==(const QueryServiceStateResponse&) const;
|
|
bool operator!=(const QueryServiceStateResponse& other) const { return !(*this == other); }
|
|
|
|
bool ParseFromArray(const void*, size_t) override;
|
|
std::string SerializeAsString() const override;
|
|
std::vector<uint8_t> SerializeAsArray() const override;
|
|
void Serialize(::protozero::Message*) const;
|
|
|
|
bool has_service_state() const { return _has_field_[1]; }
|
|
const TracingServiceState& service_state() const { return *service_state_; }
|
|
TracingServiceState* mutable_service_state() { _has_field_.set(1); return service_state_.get(); }
|
|
|
|
private:
|
|
::protozero::CopyablePtr<TracingServiceState> service_state_;
|
|
|
|
// Allows to preserve unknown protobuf fields for compatibility
|
|
// with future versions of .proto files.
|
|
std::string unknown_fields_;
|
|
|
|
std::bitset<2> _has_field_{};
|
|
};
|
|
|
|
|
|
class PERFETTO_EXPORT QueryServiceStateRequest : public ::protozero::CppMessageObj {
|
|
public:
|
|
enum FieldNumbers {
|
|
};
|
|
|
|
QueryServiceStateRequest();
|
|
~QueryServiceStateRequest() override;
|
|
QueryServiceStateRequest(QueryServiceStateRequest&&) noexcept;
|
|
QueryServiceStateRequest& operator=(QueryServiceStateRequest&&);
|
|
QueryServiceStateRequest(const QueryServiceStateRequest&);
|
|
QueryServiceStateRequest& operator=(const QueryServiceStateRequest&);
|
|
bool operator==(const QueryServiceStateRequest&) const;
|
|
bool operator!=(const QueryServiceStateRequest& other) const { return !(*this == other); }
|
|
|
|
bool ParseFromArray(const void*, size_t) override;
|
|
std::string SerializeAsString() const override;
|
|
std::vector<uint8_t> SerializeAsArray() const override;
|
|
void Serialize(::protozero::Message*) const;
|
|
|
|
private:
|
|
|
|
// Allows to preserve unknown protobuf fields for compatibility
|
|
// with future versions of .proto files.
|
|
std::string unknown_fields_;
|
|
|
|
std::bitset<2> _has_field_{};
|
|
};
|
|
|
|
|
|
class PERFETTO_EXPORT ObserveEventsResponse : public ::protozero::CppMessageObj {
|
|
public:
|
|
enum FieldNumbers {
|
|
kEventsFieldNumber = 1,
|
|
};
|
|
|
|
ObserveEventsResponse();
|
|
~ObserveEventsResponse() override;
|
|
ObserveEventsResponse(ObserveEventsResponse&&) noexcept;
|
|
ObserveEventsResponse& operator=(ObserveEventsResponse&&);
|
|
ObserveEventsResponse(const ObserveEventsResponse&);
|
|
ObserveEventsResponse& operator=(const ObserveEventsResponse&);
|
|
bool operator==(const ObserveEventsResponse&) const;
|
|
bool operator!=(const ObserveEventsResponse& other) const { return !(*this == other); }
|
|
|
|
bool ParseFromArray(const void*, size_t) override;
|
|
std::string SerializeAsString() const override;
|
|
std::vector<uint8_t> SerializeAsArray() const override;
|
|
void Serialize(::protozero::Message*) const;
|
|
|
|
bool has_events() const { return _has_field_[1]; }
|
|
const ObservableEvents& events() const { return *events_; }
|
|
ObservableEvents* mutable_events() { _has_field_.set(1); return events_.get(); }
|
|
|
|
private:
|
|
::protozero::CopyablePtr<ObservableEvents> events_;
|
|
|
|
// Allows to preserve unknown protobuf fields for compatibility
|
|
// with future versions of .proto files.
|
|
std::string unknown_fields_;
|
|
|
|
std::bitset<2> _has_field_{};
|
|
};
|
|
|
|
|
|
class PERFETTO_EXPORT ObserveEventsRequest : public ::protozero::CppMessageObj {
|
|
public:
|
|
enum FieldNumbers {
|
|
kEventsToObserveFieldNumber = 1,
|
|
};
|
|
|
|
ObserveEventsRequest();
|
|
~ObserveEventsRequest() override;
|
|
ObserveEventsRequest(ObserveEventsRequest&&) noexcept;
|
|
ObserveEventsRequest& operator=(ObserveEventsRequest&&);
|
|
ObserveEventsRequest(const ObserveEventsRequest&);
|
|
ObserveEventsRequest& operator=(const ObserveEventsRequest&);
|
|
bool operator==(const ObserveEventsRequest&) const;
|
|
bool operator!=(const ObserveEventsRequest& other) const { return !(*this == other); }
|
|
|
|
bool ParseFromArray(const void*, size_t) override;
|
|
std::string SerializeAsString() const override;
|
|
std::vector<uint8_t> SerializeAsArray() const override;
|
|
void Serialize(::protozero::Message*) const;
|
|
|
|
int events_to_observe_size() const { return static_cast<int>(events_to_observe_.size()); }
|
|
const std::vector<ObservableEvents_Type>& events_to_observe() const { return events_to_observe_; }
|
|
std::vector<ObservableEvents_Type>* mutable_events_to_observe() { return &events_to_observe_; }
|
|
void clear_events_to_observe() { events_to_observe_.clear(); }
|
|
void add_events_to_observe(ObservableEvents_Type value) { events_to_observe_.emplace_back(value); }
|
|
ObservableEvents_Type* add_events_to_observe() { events_to_observe_.emplace_back(); return &events_to_observe_.back(); }
|
|
|
|
private:
|
|
std::vector<ObservableEvents_Type> events_to_observe_;
|
|
|
|
// Allows to preserve unknown protobuf fields for compatibility
|
|
// with future versions of .proto files.
|
|
std::string unknown_fields_;
|
|
|
|
std::bitset<2> _has_field_{};
|
|
};
|
|
|
|
|
|
class PERFETTO_EXPORT GetTraceStatsResponse : public ::protozero::CppMessageObj {
|
|
public:
|
|
enum FieldNumbers {
|
|
kTraceStatsFieldNumber = 1,
|
|
};
|
|
|
|
GetTraceStatsResponse();
|
|
~GetTraceStatsResponse() override;
|
|
GetTraceStatsResponse(GetTraceStatsResponse&&) noexcept;
|
|
GetTraceStatsResponse& operator=(GetTraceStatsResponse&&);
|
|
GetTraceStatsResponse(const GetTraceStatsResponse&);
|
|
GetTraceStatsResponse& operator=(const GetTraceStatsResponse&);
|
|
bool operator==(const GetTraceStatsResponse&) const;
|
|
bool operator!=(const GetTraceStatsResponse& other) const { return !(*this == other); }
|
|
|
|
bool ParseFromArray(const void*, size_t) override;
|
|
std::string SerializeAsString() const override;
|
|
std::vector<uint8_t> SerializeAsArray() const override;
|
|
void Serialize(::protozero::Message*) const;
|
|
|
|
bool has_trace_stats() const { return _has_field_[1]; }
|
|
const TraceStats& trace_stats() const { return *trace_stats_; }
|
|
TraceStats* mutable_trace_stats() { _has_field_.set(1); return trace_stats_.get(); }
|
|
|
|
private:
|
|
::protozero::CopyablePtr<TraceStats> trace_stats_;
|
|
|
|
// Allows to preserve unknown protobuf fields for compatibility
|
|
// with future versions of .proto files.
|
|
std::string unknown_fields_;
|
|
|
|
std::bitset<2> _has_field_{};
|
|
};
|
|
|
|
|
|
class PERFETTO_EXPORT GetTraceStatsRequest : public ::protozero::CppMessageObj {
|
|
public:
|
|
enum FieldNumbers {
|
|
};
|
|
|
|
GetTraceStatsRequest();
|
|
~GetTraceStatsRequest() override;
|
|
GetTraceStatsRequest(GetTraceStatsRequest&&) noexcept;
|
|
GetTraceStatsRequest& operator=(GetTraceStatsRequest&&);
|
|
GetTraceStatsRequest(const GetTraceStatsRequest&);
|
|
GetTraceStatsRequest& operator=(const GetTraceStatsRequest&);
|
|
bool operator==(const GetTraceStatsRequest&) const;
|
|
bool operator!=(const GetTraceStatsRequest& other) const { return !(*this == other); }
|
|
|
|
bool ParseFromArray(const void*, size_t) override;
|
|
std::string SerializeAsString() const override;
|
|
std::vector<uint8_t> SerializeAsArray() const override;
|
|
void Serialize(::protozero::Message*) const;
|
|
|
|
private:
|
|
|
|
// Allows to preserve unknown protobuf fields for compatibility
|
|
// with future versions of .proto files.
|
|
std::string unknown_fields_;
|
|
|
|
std::bitset<2> _has_field_{};
|
|
};
|
|
|
|
|
|
class PERFETTO_EXPORT AttachResponse : public ::protozero::CppMessageObj {
|
|
public:
|
|
enum FieldNumbers {
|
|
kTraceConfigFieldNumber = 1,
|
|
};
|
|
|
|
AttachResponse();
|
|
~AttachResponse() override;
|
|
AttachResponse(AttachResponse&&) noexcept;
|
|
AttachResponse& operator=(AttachResponse&&);
|
|
AttachResponse(const AttachResponse&);
|
|
AttachResponse& operator=(const AttachResponse&);
|
|
bool operator==(const AttachResponse&) const;
|
|
bool operator!=(const AttachResponse& other) const { return !(*this == other); }
|
|
|
|
bool ParseFromArray(const void*, size_t) override;
|
|
std::string SerializeAsString() const override;
|
|
std::vector<uint8_t> SerializeAsArray() const override;
|
|
void Serialize(::protozero::Message*) const;
|
|
|
|
bool has_trace_config() const { return _has_field_[1]; }
|
|
const TraceConfig& trace_config() const { return *trace_config_; }
|
|
TraceConfig* mutable_trace_config() { _has_field_.set(1); return trace_config_.get(); }
|
|
|
|
private:
|
|
::protozero::CopyablePtr<TraceConfig> trace_config_;
|
|
|
|
// Allows to preserve unknown protobuf fields for compatibility
|
|
// with future versions of .proto files.
|
|
std::string unknown_fields_;
|
|
|
|
std::bitset<2> _has_field_{};
|
|
};
|
|
|
|
|
|
class PERFETTO_EXPORT AttachRequest : public ::protozero::CppMessageObj {
|
|
public:
|
|
enum FieldNumbers {
|
|
kKeyFieldNumber = 1,
|
|
};
|
|
|
|
AttachRequest();
|
|
~AttachRequest() override;
|
|
AttachRequest(AttachRequest&&) noexcept;
|
|
AttachRequest& operator=(AttachRequest&&);
|
|
AttachRequest(const AttachRequest&);
|
|
AttachRequest& operator=(const AttachRequest&);
|
|
bool operator==(const AttachRequest&) const;
|
|
bool operator!=(const AttachRequest& other) const { return !(*this == other); }
|
|
|
|
bool ParseFromArray(const void*, size_t) override;
|
|
std::string SerializeAsString() const override;
|
|
std::vector<uint8_t> SerializeAsArray() const override;
|
|
void Serialize(::protozero::Message*) const;
|
|
|
|
bool has_key() const { return _has_field_[1]; }
|
|
const std::string& key() const { return key_; }
|
|
void set_key(const std::string& value) { key_ = value; _has_field_.set(1); }
|
|
|
|
private:
|
|
std::string key_{};
|
|
|
|
// Allows to preserve unknown protobuf fields for compatibility
|
|
// with future versions of .proto files.
|
|
std::string unknown_fields_;
|
|
|
|
std::bitset<2> _has_field_{};
|
|
};
|
|
|
|
|
|
class PERFETTO_EXPORT DetachResponse : public ::protozero::CppMessageObj {
|
|
public:
|
|
enum FieldNumbers {
|
|
};
|
|
|
|
DetachResponse();
|
|
~DetachResponse() override;
|
|
DetachResponse(DetachResponse&&) noexcept;
|
|
DetachResponse& operator=(DetachResponse&&);
|
|
DetachResponse(const DetachResponse&);
|
|
DetachResponse& operator=(const DetachResponse&);
|
|
bool operator==(const DetachResponse&) const;
|
|
bool operator!=(const DetachResponse& other) const { return !(*this == other); }
|
|
|
|
bool ParseFromArray(const void*, size_t) override;
|
|
std::string SerializeAsString() const override;
|
|
std::vector<uint8_t> SerializeAsArray() const override;
|
|
void Serialize(::protozero::Message*) const;
|
|
|
|
private:
|
|
|
|
// Allows to preserve unknown protobuf fields for compatibility
|
|
// with future versions of .proto files.
|
|
std::string unknown_fields_;
|
|
|
|
std::bitset<2> _has_field_{};
|
|
};
|
|
|
|
|
|
class PERFETTO_EXPORT DetachRequest : public ::protozero::CppMessageObj {
|
|
public:
|
|
enum FieldNumbers {
|
|
kKeyFieldNumber = 1,
|
|
};
|
|
|
|
DetachRequest();
|
|
~DetachRequest() override;
|
|
DetachRequest(DetachRequest&&) noexcept;
|
|
DetachRequest& operator=(DetachRequest&&);
|
|
DetachRequest(const DetachRequest&);
|
|
DetachRequest& operator=(const DetachRequest&);
|
|
bool operator==(const DetachRequest&) const;
|
|
bool operator!=(const DetachRequest& other) const { return !(*this == other); }
|
|
|
|
bool ParseFromArray(const void*, size_t) override;
|
|
std::string SerializeAsString() const override;
|
|
std::vector<uint8_t> SerializeAsArray() const override;
|
|
void Serialize(::protozero::Message*) const;
|
|
|
|
bool has_key() const { return _has_field_[1]; }
|
|
const std::string& key() const { return key_; }
|
|
void set_key(const std::string& value) { key_ = value; _has_field_.set(1); }
|
|
|
|
private:
|
|
std::string key_{};
|
|
|
|
// Allows to preserve unknown protobuf fields for compatibility
|
|
// with future versions of .proto files.
|
|
std::string unknown_fields_;
|
|
|
|
std::bitset<2> _has_field_{};
|
|
};
|
|
|
|
|
|
class PERFETTO_EXPORT FlushResponse : public ::protozero::CppMessageObj {
|
|
public:
|
|
enum FieldNumbers {
|
|
};
|
|
|
|
FlushResponse();
|
|
~FlushResponse() override;
|
|
FlushResponse(FlushResponse&&) noexcept;
|
|
FlushResponse& operator=(FlushResponse&&);
|
|
FlushResponse(const FlushResponse&);
|
|
FlushResponse& operator=(const FlushResponse&);
|
|
bool operator==(const FlushResponse&) const;
|
|
bool operator!=(const FlushResponse& other) const { return !(*this == other); }
|
|
|
|
bool ParseFromArray(const void*, size_t) override;
|
|
std::string SerializeAsString() const override;
|
|
std::vector<uint8_t> SerializeAsArray() const override;
|
|
void Serialize(::protozero::Message*) const;
|
|
|
|
private:
|
|
|
|
// Allows to preserve unknown protobuf fields for compatibility
|
|
// with future versions of .proto files.
|
|
std::string unknown_fields_;
|
|
|
|
std::bitset<2> _has_field_{};
|
|
};
|
|
|
|
|
|
class PERFETTO_EXPORT FlushRequest : public ::protozero::CppMessageObj {
|
|
public:
|
|
enum FieldNumbers {
|
|
kTimeoutMsFieldNumber = 1,
|
|
};
|
|
|
|
FlushRequest();
|
|
~FlushRequest() override;
|
|
FlushRequest(FlushRequest&&) noexcept;
|
|
FlushRequest& operator=(FlushRequest&&);
|
|
FlushRequest(const FlushRequest&);
|
|
FlushRequest& operator=(const FlushRequest&);
|
|
bool operator==(const FlushRequest&) const;
|
|
bool operator!=(const FlushRequest& other) const { return !(*this == other); }
|
|
|
|
bool ParseFromArray(const void*, size_t) override;
|
|
std::string SerializeAsString() const override;
|
|
std::vector<uint8_t> SerializeAsArray() const override;
|
|
void Serialize(::protozero::Message*) const;
|
|
|
|
bool has_timeout_ms() const { return _has_field_[1]; }
|
|
uint32_t timeout_ms() const { return timeout_ms_; }
|
|
void set_timeout_ms(uint32_t value) { timeout_ms_ = value; _has_field_.set(1); }
|
|
|
|
private:
|
|
uint32_t timeout_ms_{};
|
|
|
|
// Allows to preserve unknown protobuf fields for compatibility
|
|
// with future versions of .proto files.
|
|
std::string unknown_fields_;
|
|
|
|
std::bitset<2> _has_field_{};
|
|
};
|
|
|
|
|
|
class PERFETTO_EXPORT FreeBuffersResponse : public ::protozero::CppMessageObj {
|
|
public:
|
|
enum FieldNumbers {
|
|
};
|
|
|
|
FreeBuffersResponse();
|
|
~FreeBuffersResponse() override;
|
|
FreeBuffersResponse(FreeBuffersResponse&&) noexcept;
|
|
FreeBuffersResponse& operator=(FreeBuffersResponse&&);
|
|
FreeBuffersResponse(const FreeBuffersResponse&);
|
|
FreeBuffersResponse& operator=(const FreeBuffersResponse&);
|
|
bool operator==(const FreeBuffersResponse&) const;
|
|
bool operator!=(const FreeBuffersResponse& other) const { return !(*this == other); }
|
|
|
|
bool ParseFromArray(const void*, size_t) override;
|
|
std::string SerializeAsString() const override;
|
|
std::vector<uint8_t> SerializeAsArray() const override;
|
|
void Serialize(::protozero::Message*) const;
|
|
|
|
private:
|
|
|
|
// Allows to preserve unknown protobuf fields for compatibility
|
|
// with future versions of .proto files.
|
|
std::string unknown_fields_;
|
|
|
|
std::bitset<2> _has_field_{};
|
|
};
|
|
|
|
|
|
class PERFETTO_EXPORT FreeBuffersRequest : public ::protozero::CppMessageObj {
|
|
public:
|
|
enum FieldNumbers {
|
|
kBufferIdsFieldNumber = 1,
|
|
};
|
|
|
|
FreeBuffersRequest();
|
|
~FreeBuffersRequest() override;
|
|
FreeBuffersRequest(FreeBuffersRequest&&) noexcept;
|
|
FreeBuffersRequest& operator=(FreeBuffersRequest&&);
|
|
FreeBuffersRequest(const FreeBuffersRequest&);
|
|
FreeBuffersRequest& operator=(const FreeBuffersRequest&);
|
|
bool operator==(const FreeBuffersRequest&) const;
|
|
bool operator!=(const FreeBuffersRequest& other) const { return !(*this == other); }
|
|
|
|
bool ParseFromArray(const void*, size_t) override;
|
|
std::string SerializeAsString() const override;
|
|
std::vector<uint8_t> SerializeAsArray() const override;
|
|
void Serialize(::protozero::Message*) const;
|
|
|
|
int buffer_ids_size() const { return static_cast<int>(buffer_ids_.size()); }
|
|
const std::vector<uint32_t>& buffer_ids() const { return buffer_ids_; }
|
|
std::vector<uint32_t>* mutable_buffer_ids() { return &buffer_ids_; }
|
|
void clear_buffer_ids() { buffer_ids_.clear(); }
|
|
void add_buffer_ids(uint32_t value) { buffer_ids_.emplace_back(value); }
|
|
uint32_t* add_buffer_ids() { buffer_ids_.emplace_back(); return &buffer_ids_.back(); }
|
|
|
|
private:
|
|
std::vector<uint32_t> buffer_ids_;
|
|
|
|
// Allows to preserve unknown protobuf fields for compatibility
|
|
// with future versions of .proto files.
|
|
std::string unknown_fields_;
|
|
|
|
std::bitset<2> _has_field_{};
|
|
};
|
|
|
|
|
|
class PERFETTO_EXPORT ReadBuffersResponse : public ::protozero::CppMessageObj {
|
|
public:
|
|
using Slice = ReadBuffersResponse_Slice;
|
|
enum FieldNumbers {
|
|
kSlicesFieldNumber = 2,
|
|
};
|
|
|
|
ReadBuffersResponse();
|
|
~ReadBuffersResponse() override;
|
|
ReadBuffersResponse(ReadBuffersResponse&&) noexcept;
|
|
ReadBuffersResponse& operator=(ReadBuffersResponse&&);
|
|
ReadBuffersResponse(const ReadBuffersResponse&);
|
|
ReadBuffersResponse& operator=(const ReadBuffersResponse&);
|
|
bool operator==(const ReadBuffersResponse&) const;
|
|
bool operator!=(const ReadBuffersResponse& other) const { return !(*this == other); }
|
|
|
|
bool ParseFromArray(const void*, size_t) override;
|
|
std::string SerializeAsString() const override;
|
|
std::vector<uint8_t> SerializeAsArray() const override;
|
|
void Serialize(::protozero::Message*) const;
|
|
|
|
int slices_size() const { return static_cast<int>(slices_.size()); }
|
|
const std::vector<ReadBuffersResponse_Slice>& slices() const { return slices_; }
|
|
std::vector<ReadBuffersResponse_Slice>* mutable_slices() { return &slices_; }
|
|
void clear_slices() { slices_.clear(); }
|
|
ReadBuffersResponse_Slice* add_slices() { slices_.emplace_back(); return &slices_.back(); }
|
|
|
|
private:
|
|
std::vector<ReadBuffersResponse_Slice> slices_;
|
|
|
|
// Allows to preserve unknown protobuf fields for compatibility
|
|
// with future versions of .proto files.
|
|
std::string unknown_fields_;
|
|
|
|
std::bitset<3> _has_field_{};
|
|
};
|
|
|
|
|
|
class PERFETTO_EXPORT ReadBuffersResponse_Slice : public ::protozero::CppMessageObj {
|
|
public:
|
|
enum FieldNumbers {
|
|
kDataFieldNumber = 1,
|
|
kLastSliceForPacketFieldNumber = 2,
|
|
};
|
|
|
|
ReadBuffersResponse_Slice();
|
|
~ReadBuffersResponse_Slice() override;
|
|
ReadBuffersResponse_Slice(ReadBuffersResponse_Slice&&) noexcept;
|
|
ReadBuffersResponse_Slice& operator=(ReadBuffersResponse_Slice&&);
|
|
ReadBuffersResponse_Slice(const ReadBuffersResponse_Slice&);
|
|
ReadBuffersResponse_Slice& operator=(const ReadBuffersResponse_Slice&);
|
|
bool operator==(const ReadBuffersResponse_Slice&) const;
|
|
bool operator!=(const ReadBuffersResponse_Slice& other) const { return !(*this == other); }
|
|
|
|
bool ParseFromArray(const void*, size_t) override;
|
|
std::string SerializeAsString() const override;
|
|
std::vector<uint8_t> SerializeAsArray() const override;
|
|
void Serialize(::protozero::Message*) const;
|
|
|
|
bool has_data() const { return _has_field_[1]; }
|
|
const std::string& data() const { return data_; }
|
|
void set_data(const std::string& value) { data_ = value; _has_field_.set(1); }
|
|
void set_data(const void* p, size_t s) { data_.assign(reinterpret_cast<const char*>(p), s); _has_field_.set(1); }
|
|
|
|
bool has_last_slice_for_packet() const { return _has_field_[2]; }
|
|
bool last_slice_for_packet() const { return last_slice_for_packet_; }
|
|
void set_last_slice_for_packet(bool value) { last_slice_for_packet_ = value; _has_field_.set(2); }
|
|
|
|
private:
|
|
std::string data_{};
|
|
bool last_slice_for_packet_{};
|
|
|
|
// Allows to preserve unknown protobuf fields for compatibility
|
|
// with future versions of .proto files.
|
|
std::string unknown_fields_;
|
|
|
|
std::bitset<3> _has_field_{};
|
|
};
|
|
|
|
|
|
class PERFETTO_EXPORT ReadBuffersRequest : public ::protozero::CppMessageObj {
|
|
public:
|
|
enum FieldNumbers {
|
|
};
|
|
|
|
ReadBuffersRequest();
|
|
~ReadBuffersRequest() override;
|
|
ReadBuffersRequest(ReadBuffersRequest&&) noexcept;
|
|
ReadBuffersRequest& operator=(ReadBuffersRequest&&);
|
|
ReadBuffersRequest(const ReadBuffersRequest&);
|
|
ReadBuffersRequest& operator=(const ReadBuffersRequest&);
|
|
bool operator==(const ReadBuffersRequest&) const;
|
|
bool operator!=(const ReadBuffersRequest& other) const { return !(*this == other); }
|
|
|
|
bool ParseFromArray(const void*, size_t) override;
|
|
std::string SerializeAsString() const override;
|
|
std::vector<uint8_t> SerializeAsArray() const override;
|
|
void Serialize(::protozero::Message*) const;
|
|
|
|
private:
|
|
|
|
// Allows to preserve unknown protobuf fields for compatibility
|
|
// with future versions of .proto files.
|
|
std::string unknown_fields_;
|
|
|
|
std::bitset<2> _has_field_{};
|
|
};
|
|
|
|
|
|
class PERFETTO_EXPORT DisableTracingResponse : public ::protozero::CppMessageObj {
|
|
public:
|
|
enum FieldNumbers {
|
|
};
|
|
|
|
DisableTracingResponse();
|
|
~DisableTracingResponse() override;
|
|
DisableTracingResponse(DisableTracingResponse&&) noexcept;
|
|
DisableTracingResponse& operator=(DisableTracingResponse&&);
|
|
DisableTracingResponse(const DisableTracingResponse&);
|
|
DisableTracingResponse& operator=(const DisableTracingResponse&);
|
|
bool operator==(const DisableTracingResponse&) const;
|
|
bool operator!=(const DisableTracingResponse& other) const { return !(*this == other); }
|
|
|
|
bool ParseFromArray(const void*, size_t) override;
|
|
std::string SerializeAsString() const override;
|
|
std::vector<uint8_t> SerializeAsArray() const override;
|
|
void Serialize(::protozero::Message*) const;
|
|
|
|
private:
|
|
|
|
// Allows to preserve unknown protobuf fields for compatibility
|
|
// with future versions of .proto files.
|
|
std::string unknown_fields_;
|
|
|
|
std::bitset<2> _has_field_{};
|
|
};
|
|
|
|
|
|
class PERFETTO_EXPORT DisableTracingRequest : public ::protozero::CppMessageObj {
|
|
public:
|
|
enum FieldNumbers {
|
|
};
|
|
|
|
DisableTracingRequest();
|
|
~DisableTracingRequest() override;
|
|
DisableTracingRequest(DisableTracingRequest&&) noexcept;
|
|
DisableTracingRequest& operator=(DisableTracingRequest&&);
|
|
DisableTracingRequest(const DisableTracingRequest&);
|
|
DisableTracingRequest& operator=(const DisableTracingRequest&);
|
|
bool operator==(const DisableTracingRequest&) const;
|
|
bool operator!=(const DisableTracingRequest& other) const { return !(*this == other); }
|
|
|
|
bool ParseFromArray(const void*, size_t) override;
|
|
std::string SerializeAsString() const override;
|
|
std::vector<uint8_t> SerializeAsArray() const override;
|
|
void Serialize(::protozero::Message*) const;
|
|
|
|
private:
|
|
|
|
// Allows to preserve unknown protobuf fields for compatibility
|
|
// with future versions of .proto files.
|
|
std::string unknown_fields_;
|
|
|
|
std::bitset<2> _has_field_{};
|
|
};
|
|
|
|
|
|
class PERFETTO_EXPORT ChangeTraceConfigResponse : public ::protozero::CppMessageObj {
|
|
public:
|
|
enum FieldNumbers {
|
|
};
|
|
|
|
ChangeTraceConfigResponse();
|
|
~ChangeTraceConfigResponse() override;
|
|
ChangeTraceConfigResponse(ChangeTraceConfigResponse&&) noexcept;
|
|
ChangeTraceConfigResponse& operator=(ChangeTraceConfigResponse&&);
|
|
ChangeTraceConfigResponse(const ChangeTraceConfigResponse&);
|
|
ChangeTraceConfigResponse& operator=(const ChangeTraceConfigResponse&);
|
|
bool operator==(const ChangeTraceConfigResponse&) const;
|
|
bool operator!=(const ChangeTraceConfigResponse& other) const { return !(*this == other); }
|
|
|
|
bool ParseFromArray(const void*, size_t) override;
|
|
std::string SerializeAsString() const override;
|
|
std::vector<uint8_t> SerializeAsArray() const override;
|
|
void Serialize(::protozero::Message*) const;
|
|
|
|
private:
|
|
|
|
// Allows to preserve unknown protobuf fields for compatibility
|
|
// with future versions of .proto files.
|
|
std::string unknown_fields_;
|
|
|
|
std::bitset<2> _has_field_{};
|
|
};
|
|
|
|
|
|
class PERFETTO_EXPORT ChangeTraceConfigRequest : public ::protozero::CppMessageObj {
|
|
public:
|
|
enum FieldNumbers {
|
|
kTraceConfigFieldNumber = 1,
|
|
};
|
|
|
|
ChangeTraceConfigRequest();
|
|
~ChangeTraceConfigRequest() override;
|
|
ChangeTraceConfigRequest(ChangeTraceConfigRequest&&) noexcept;
|
|
ChangeTraceConfigRequest& operator=(ChangeTraceConfigRequest&&);
|
|
ChangeTraceConfigRequest(const ChangeTraceConfigRequest&);
|
|
ChangeTraceConfigRequest& operator=(const ChangeTraceConfigRequest&);
|
|
bool operator==(const ChangeTraceConfigRequest&) const;
|
|
bool operator!=(const ChangeTraceConfigRequest& other) const { return !(*this == other); }
|
|
|
|
bool ParseFromArray(const void*, size_t) override;
|
|
std::string SerializeAsString() const override;
|
|
std::vector<uint8_t> SerializeAsArray() const override;
|
|
void Serialize(::protozero::Message*) const;
|
|
|
|
bool has_trace_config() const { return _has_field_[1]; }
|
|
const TraceConfig& trace_config() const { return *trace_config_; }
|
|
TraceConfig* mutable_trace_config() { _has_field_.set(1); return trace_config_.get(); }
|
|
|
|
private:
|
|
::protozero::CopyablePtr<TraceConfig> trace_config_;
|
|
|
|
// Allows to preserve unknown protobuf fields for compatibility
|
|
// with future versions of .proto files.
|
|
std::string unknown_fields_;
|
|
|
|
std::bitset<2> _has_field_{};
|
|
};
|
|
|
|
|
|
class PERFETTO_EXPORT StartTracingResponse : public ::protozero::CppMessageObj {
|
|
public:
|
|
enum FieldNumbers {
|
|
};
|
|
|
|
StartTracingResponse();
|
|
~StartTracingResponse() override;
|
|
StartTracingResponse(StartTracingResponse&&) noexcept;
|
|
StartTracingResponse& operator=(StartTracingResponse&&);
|
|
StartTracingResponse(const StartTracingResponse&);
|
|
StartTracingResponse& operator=(const StartTracingResponse&);
|
|
bool operator==(const StartTracingResponse&) const;
|
|
bool operator!=(const StartTracingResponse& other) const { return !(*this == other); }
|
|
|
|
bool ParseFromArray(const void*, size_t) override;
|
|
std::string SerializeAsString() const override;
|
|
std::vector<uint8_t> SerializeAsArray() const override;
|
|
void Serialize(::protozero::Message*) const;
|
|
|
|
private:
|
|
|
|
// Allows to preserve unknown protobuf fields for compatibility
|
|
// with future versions of .proto files.
|
|
std::string unknown_fields_;
|
|
|
|
std::bitset<2> _has_field_{};
|
|
};
|
|
|
|
|
|
class PERFETTO_EXPORT StartTracingRequest : public ::protozero::CppMessageObj {
|
|
public:
|
|
enum FieldNumbers {
|
|
};
|
|
|
|
StartTracingRequest();
|
|
~StartTracingRequest() override;
|
|
StartTracingRequest(StartTracingRequest&&) noexcept;
|
|
StartTracingRequest& operator=(StartTracingRequest&&);
|
|
StartTracingRequest(const StartTracingRequest&);
|
|
StartTracingRequest& operator=(const StartTracingRequest&);
|
|
bool operator==(const StartTracingRequest&) const;
|
|
bool operator!=(const StartTracingRequest& other) const { return !(*this == other); }
|
|
|
|
bool ParseFromArray(const void*, size_t) override;
|
|
std::string SerializeAsString() const override;
|
|
std::vector<uint8_t> SerializeAsArray() const override;
|
|
void Serialize(::protozero::Message*) const;
|
|
|
|
private:
|
|
|
|
// Allows to preserve unknown protobuf fields for compatibility
|
|
// with future versions of .proto files.
|
|
std::string unknown_fields_;
|
|
|
|
std::bitset<2> _has_field_{};
|
|
};
|
|
|
|
|
|
class PERFETTO_EXPORT EnableTracingResponse : public ::protozero::CppMessageObj {
|
|
public:
|
|
enum FieldNumbers {
|
|
kDisabledFieldNumber = 1,
|
|
};
|
|
|
|
EnableTracingResponse();
|
|
~EnableTracingResponse() override;
|
|
EnableTracingResponse(EnableTracingResponse&&) noexcept;
|
|
EnableTracingResponse& operator=(EnableTracingResponse&&);
|
|
EnableTracingResponse(const EnableTracingResponse&);
|
|
EnableTracingResponse& operator=(const EnableTracingResponse&);
|
|
bool operator==(const EnableTracingResponse&) const;
|
|
bool operator!=(const EnableTracingResponse& other) const { return !(*this == other); }
|
|
|
|
bool ParseFromArray(const void*, size_t) override;
|
|
std::string SerializeAsString() const override;
|
|
std::vector<uint8_t> SerializeAsArray() const override;
|
|
void Serialize(::protozero::Message*) const;
|
|
|
|
bool has_disabled() const { return _has_field_[1]; }
|
|
bool disabled() const { return disabled_; }
|
|
void set_disabled(bool value) { disabled_ = value; _has_field_.set(1); }
|
|
|
|
private:
|
|
bool disabled_{};
|
|
|
|
// Allows to preserve unknown protobuf fields for compatibility
|
|
// with future versions of .proto files.
|
|
std::string unknown_fields_;
|
|
|
|
std::bitset<2> _has_field_{};
|
|
};
|
|
|
|
|
|
class PERFETTO_EXPORT EnableTracingRequest : public ::protozero::CppMessageObj {
|
|
public:
|
|
enum FieldNumbers {
|
|
kTraceConfigFieldNumber = 1,
|
|
kAttachNotificationOnlyFieldNumber = 2,
|
|
};
|
|
|
|
EnableTracingRequest();
|
|
~EnableTracingRequest() override;
|
|
EnableTracingRequest(EnableTracingRequest&&) noexcept;
|
|
EnableTracingRequest& operator=(EnableTracingRequest&&);
|
|
EnableTracingRequest(const EnableTracingRequest&);
|
|
EnableTracingRequest& operator=(const EnableTracingRequest&);
|
|
bool operator==(const EnableTracingRequest&) const;
|
|
bool operator!=(const EnableTracingRequest& other) const { return !(*this == other); }
|
|
|
|
bool ParseFromArray(const void*, size_t) override;
|
|
std::string SerializeAsString() const override;
|
|
std::vector<uint8_t> SerializeAsArray() const override;
|
|
void Serialize(::protozero::Message*) const;
|
|
|
|
bool has_trace_config() const { return _has_field_[1]; }
|
|
const TraceConfig& trace_config() const { return *trace_config_; }
|
|
TraceConfig* mutable_trace_config() { _has_field_.set(1); return trace_config_.get(); }
|
|
|
|
bool has_attach_notification_only() const { return _has_field_[2]; }
|
|
bool attach_notification_only() const { return attach_notification_only_; }
|
|
void set_attach_notification_only(bool value) { attach_notification_only_ = value; _has_field_.set(2); }
|
|
|
|
private:
|
|
::protozero::CopyablePtr<TraceConfig> trace_config_;
|
|
bool attach_notification_only_{};
|
|
|
|
// Allows to preserve unknown protobuf fields for compatibility
|
|
// with future versions of .proto files.
|
|
std::string unknown_fields_;
|
|
|
|
std::bitset<3> _has_field_{};
|
|
};
|
|
|
|
} // namespace perfetto
|
|
} // namespace protos
|
|
} // namespace gen
|
|
|
|
#endif // PERFETTO_PROTOS_PROTOS_PERFETTO_IPC_CONSUMER_PORT_PROTO_CPP_H_
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/scattered_heap_buffer.h"
|
|
// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
|
|
#pragma GCC diagnostic push
|
|
#pragma GCC diagnostic ignored "-Wfloat-equal"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/ipc/consumer_port.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/trace_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/common/builtin_clock.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/data_source_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/track_event/track_event_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/test_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/sys_stats/sys_stats_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/common/sys_stats_counters.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/profiling/perf_event_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/profiling/java_hprof_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/profiling/heapprofd_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/process_stats/process_stats_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/power/android_power_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/inode_file/inode_file_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/gpu/vulkan_memory_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/gpu/gpu_counter_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/ftrace/ftrace_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/chrome/chrome_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/android/packages_list_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/android/android_polled_state_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/android/android_log_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/common/android_log_constants.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/common/trace_stats.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/common/tracing_service_capabilities.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/common/observable_events.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/common/tracing_service_state.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/common/data_source_descriptor.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/common/track_event_descriptor.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/common/gpu_counter_descriptor.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/common/observable_events.gen.h"
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace gen {
|
|
|
|
QueryCapabilitiesResponse::QueryCapabilitiesResponse() = default;
|
|
QueryCapabilitiesResponse::~QueryCapabilitiesResponse() = default;
|
|
QueryCapabilitiesResponse::QueryCapabilitiesResponse(const QueryCapabilitiesResponse&) = default;
|
|
QueryCapabilitiesResponse& QueryCapabilitiesResponse::operator=(const QueryCapabilitiesResponse&) = default;
|
|
QueryCapabilitiesResponse::QueryCapabilitiesResponse(QueryCapabilitiesResponse&&) noexcept = default;
|
|
QueryCapabilitiesResponse& QueryCapabilitiesResponse::operator=(QueryCapabilitiesResponse&&) = default;
|
|
|
|
bool QueryCapabilitiesResponse::operator==(const QueryCapabilitiesResponse& other) const {
|
|
return unknown_fields_ == other.unknown_fields_
|
|
&& capabilities_ == other.capabilities_;
|
|
}
|
|
|
|
bool QueryCapabilitiesResponse::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* capabilities */:
|
|
(*capabilities_).ParseFromString(field.as_std_string());
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string QueryCapabilitiesResponse::SerializeAsString() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> QueryCapabilitiesResponse::SerializeAsArray() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void QueryCapabilitiesResponse::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: capabilities
|
|
if (_has_field_[1]) {
|
|
(*capabilities_).Serialize(msg->BeginNestedMessage<::protozero::Message>(1));
|
|
}
|
|
|
|
msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
|
|
}
|
|
|
|
|
|
QueryCapabilitiesRequest::QueryCapabilitiesRequest() = default;
|
|
QueryCapabilitiesRequest::~QueryCapabilitiesRequest() = default;
|
|
QueryCapabilitiesRequest::QueryCapabilitiesRequest(const QueryCapabilitiesRequest&) = default;
|
|
QueryCapabilitiesRequest& QueryCapabilitiesRequest::operator=(const QueryCapabilitiesRequest&) = default;
|
|
QueryCapabilitiesRequest::QueryCapabilitiesRequest(QueryCapabilitiesRequest&&) noexcept = default;
|
|
QueryCapabilitiesRequest& QueryCapabilitiesRequest::operator=(QueryCapabilitiesRequest&&) = default;
|
|
|
|
bool QueryCapabilitiesRequest::operator==(const QueryCapabilitiesRequest& other) const {
|
|
return unknown_fields_ == other.unknown_fields_;
|
|
}
|
|
|
|
bool QueryCapabilitiesRequest::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string QueryCapabilitiesRequest::SerializeAsString() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> QueryCapabilitiesRequest::SerializeAsArray() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void QueryCapabilitiesRequest::Serialize(::protozero::Message* msg) const {
|
|
msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
|
|
}
|
|
|
|
|
|
QueryServiceStateResponse::QueryServiceStateResponse() = default;
|
|
QueryServiceStateResponse::~QueryServiceStateResponse() = default;
|
|
QueryServiceStateResponse::QueryServiceStateResponse(const QueryServiceStateResponse&) = default;
|
|
QueryServiceStateResponse& QueryServiceStateResponse::operator=(const QueryServiceStateResponse&) = default;
|
|
QueryServiceStateResponse::QueryServiceStateResponse(QueryServiceStateResponse&&) noexcept = default;
|
|
QueryServiceStateResponse& QueryServiceStateResponse::operator=(QueryServiceStateResponse&&) = default;
|
|
|
|
bool QueryServiceStateResponse::operator==(const QueryServiceStateResponse& other) const {
|
|
return unknown_fields_ == other.unknown_fields_
|
|
&& service_state_ == other.service_state_;
|
|
}
|
|
|
|
bool QueryServiceStateResponse::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* service_state */:
|
|
(*service_state_).ParseFromString(field.as_std_string());
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string QueryServiceStateResponse::SerializeAsString() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> QueryServiceStateResponse::SerializeAsArray() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void QueryServiceStateResponse::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: service_state
|
|
if (_has_field_[1]) {
|
|
(*service_state_).Serialize(msg->BeginNestedMessage<::protozero::Message>(1));
|
|
}
|
|
|
|
msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
|
|
}
|
|
|
|
|
|
QueryServiceStateRequest::QueryServiceStateRequest() = default;
|
|
QueryServiceStateRequest::~QueryServiceStateRequest() = default;
|
|
QueryServiceStateRequest::QueryServiceStateRequest(const QueryServiceStateRequest&) = default;
|
|
QueryServiceStateRequest& QueryServiceStateRequest::operator=(const QueryServiceStateRequest&) = default;
|
|
QueryServiceStateRequest::QueryServiceStateRequest(QueryServiceStateRequest&&) noexcept = default;
|
|
QueryServiceStateRequest& QueryServiceStateRequest::operator=(QueryServiceStateRequest&&) = default;
|
|
|
|
bool QueryServiceStateRequest::operator==(const QueryServiceStateRequest& other) const {
|
|
return unknown_fields_ == other.unknown_fields_;
|
|
}
|
|
|
|
bool QueryServiceStateRequest::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string QueryServiceStateRequest::SerializeAsString() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> QueryServiceStateRequest::SerializeAsArray() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void QueryServiceStateRequest::Serialize(::protozero::Message* msg) const {
|
|
msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
|
|
}
|
|
|
|
|
|
ObserveEventsResponse::ObserveEventsResponse() = default;
|
|
ObserveEventsResponse::~ObserveEventsResponse() = default;
|
|
ObserveEventsResponse::ObserveEventsResponse(const ObserveEventsResponse&) = default;
|
|
ObserveEventsResponse& ObserveEventsResponse::operator=(const ObserveEventsResponse&) = default;
|
|
ObserveEventsResponse::ObserveEventsResponse(ObserveEventsResponse&&) noexcept = default;
|
|
ObserveEventsResponse& ObserveEventsResponse::operator=(ObserveEventsResponse&&) = default;
|
|
|
|
bool ObserveEventsResponse::operator==(const ObserveEventsResponse& other) const {
|
|
return unknown_fields_ == other.unknown_fields_
|
|
&& events_ == other.events_;
|
|
}
|
|
|
|
bool ObserveEventsResponse::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* events */:
|
|
(*events_).ParseFromString(field.as_std_string());
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string ObserveEventsResponse::SerializeAsString() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> ObserveEventsResponse::SerializeAsArray() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void ObserveEventsResponse::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: events
|
|
if (_has_field_[1]) {
|
|
(*events_).Serialize(msg->BeginNestedMessage<::protozero::Message>(1));
|
|
}
|
|
|
|
msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
|
|
}
|
|
|
|
|
|
ObserveEventsRequest::ObserveEventsRequest() = default;
|
|
ObserveEventsRequest::~ObserveEventsRequest() = default;
|
|
ObserveEventsRequest::ObserveEventsRequest(const ObserveEventsRequest&) = default;
|
|
ObserveEventsRequest& ObserveEventsRequest::operator=(const ObserveEventsRequest&) = default;
|
|
ObserveEventsRequest::ObserveEventsRequest(ObserveEventsRequest&&) noexcept = default;
|
|
ObserveEventsRequest& ObserveEventsRequest::operator=(ObserveEventsRequest&&) = default;
|
|
|
|
bool ObserveEventsRequest::operator==(const ObserveEventsRequest& other) const {
|
|
return unknown_fields_ == other.unknown_fields_
|
|
&& events_to_observe_ == other.events_to_observe_;
|
|
}
|
|
|
|
bool ObserveEventsRequest::ParseFromArray(const void* raw, size_t size) {
|
|
events_to_observe_.clear();
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* events_to_observe */:
|
|
events_to_observe_.emplace_back();
|
|
field.get(&events_to_observe_.back());
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string ObserveEventsRequest::SerializeAsString() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> ObserveEventsRequest::SerializeAsArray() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void ObserveEventsRequest::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: events_to_observe
|
|
for (auto& it : events_to_observe_) {
|
|
msg->AppendVarInt(1, it);
|
|
}
|
|
|
|
msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
|
|
}
|
|
|
|
|
|
GetTraceStatsResponse::GetTraceStatsResponse() = default;
|
|
GetTraceStatsResponse::~GetTraceStatsResponse() = default;
|
|
GetTraceStatsResponse::GetTraceStatsResponse(const GetTraceStatsResponse&) = default;
|
|
GetTraceStatsResponse& GetTraceStatsResponse::operator=(const GetTraceStatsResponse&) = default;
|
|
GetTraceStatsResponse::GetTraceStatsResponse(GetTraceStatsResponse&&) noexcept = default;
|
|
GetTraceStatsResponse& GetTraceStatsResponse::operator=(GetTraceStatsResponse&&) = default;
|
|
|
|
bool GetTraceStatsResponse::operator==(const GetTraceStatsResponse& other) const {
|
|
return unknown_fields_ == other.unknown_fields_
|
|
&& trace_stats_ == other.trace_stats_;
|
|
}
|
|
|
|
bool GetTraceStatsResponse::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* trace_stats */:
|
|
(*trace_stats_).ParseFromString(field.as_std_string());
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string GetTraceStatsResponse::SerializeAsString() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> GetTraceStatsResponse::SerializeAsArray() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void GetTraceStatsResponse::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: trace_stats
|
|
if (_has_field_[1]) {
|
|
(*trace_stats_).Serialize(msg->BeginNestedMessage<::protozero::Message>(1));
|
|
}
|
|
|
|
msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
|
|
}
|
|
|
|
|
|
GetTraceStatsRequest::GetTraceStatsRequest() = default;
|
|
GetTraceStatsRequest::~GetTraceStatsRequest() = default;
|
|
GetTraceStatsRequest::GetTraceStatsRequest(const GetTraceStatsRequest&) = default;
|
|
GetTraceStatsRequest& GetTraceStatsRequest::operator=(const GetTraceStatsRequest&) = default;
|
|
GetTraceStatsRequest::GetTraceStatsRequest(GetTraceStatsRequest&&) noexcept = default;
|
|
GetTraceStatsRequest& GetTraceStatsRequest::operator=(GetTraceStatsRequest&&) = default;
|
|
|
|
bool GetTraceStatsRequest::operator==(const GetTraceStatsRequest& other) const {
|
|
return unknown_fields_ == other.unknown_fields_;
|
|
}
|
|
|
|
bool GetTraceStatsRequest::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string GetTraceStatsRequest::SerializeAsString() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> GetTraceStatsRequest::SerializeAsArray() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void GetTraceStatsRequest::Serialize(::protozero::Message* msg) const {
|
|
msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
|
|
}
|
|
|
|
|
|
AttachResponse::AttachResponse() = default;
|
|
AttachResponse::~AttachResponse() = default;
|
|
AttachResponse::AttachResponse(const AttachResponse&) = default;
|
|
AttachResponse& AttachResponse::operator=(const AttachResponse&) = default;
|
|
AttachResponse::AttachResponse(AttachResponse&&) noexcept = default;
|
|
AttachResponse& AttachResponse::operator=(AttachResponse&&) = default;
|
|
|
|
bool AttachResponse::operator==(const AttachResponse& other) const {
|
|
return unknown_fields_ == other.unknown_fields_
|
|
&& trace_config_ == other.trace_config_;
|
|
}
|
|
|
|
bool AttachResponse::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* trace_config */:
|
|
(*trace_config_).ParseFromString(field.as_std_string());
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string AttachResponse::SerializeAsString() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> AttachResponse::SerializeAsArray() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void AttachResponse::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: trace_config
|
|
if (_has_field_[1]) {
|
|
(*trace_config_).Serialize(msg->BeginNestedMessage<::protozero::Message>(1));
|
|
}
|
|
|
|
msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
|
|
}
|
|
|
|
|
|
AttachRequest::AttachRequest() = default;
|
|
AttachRequest::~AttachRequest() = default;
|
|
AttachRequest::AttachRequest(const AttachRequest&) = default;
|
|
AttachRequest& AttachRequest::operator=(const AttachRequest&) = default;
|
|
AttachRequest::AttachRequest(AttachRequest&&) noexcept = default;
|
|
AttachRequest& AttachRequest::operator=(AttachRequest&&) = default;
|
|
|
|
bool AttachRequest::operator==(const AttachRequest& other) const {
|
|
return unknown_fields_ == other.unknown_fields_
|
|
&& key_ == other.key_;
|
|
}
|
|
|
|
bool AttachRequest::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* key */:
|
|
field.get(&key_);
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string AttachRequest::SerializeAsString() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> AttachRequest::SerializeAsArray() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void AttachRequest::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: key
|
|
if (_has_field_[1]) {
|
|
msg->AppendString(1, key_);
|
|
}
|
|
|
|
msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
|
|
}
|
|
|
|
|
|
DetachResponse::DetachResponse() = default;
|
|
DetachResponse::~DetachResponse() = default;
|
|
DetachResponse::DetachResponse(const DetachResponse&) = default;
|
|
DetachResponse& DetachResponse::operator=(const DetachResponse&) = default;
|
|
DetachResponse::DetachResponse(DetachResponse&&) noexcept = default;
|
|
DetachResponse& DetachResponse::operator=(DetachResponse&&) = default;
|
|
|
|
bool DetachResponse::operator==(const DetachResponse& other) const {
|
|
return unknown_fields_ == other.unknown_fields_;
|
|
}
|
|
|
|
bool DetachResponse::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string DetachResponse::SerializeAsString() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> DetachResponse::SerializeAsArray() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void DetachResponse::Serialize(::protozero::Message* msg) const {
|
|
msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
|
|
}
|
|
|
|
|
|
DetachRequest::DetachRequest() = default;
|
|
DetachRequest::~DetachRequest() = default;
|
|
DetachRequest::DetachRequest(const DetachRequest&) = default;
|
|
DetachRequest& DetachRequest::operator=(const DetachRequest&) = default;
|
|
DetachRequest::DetachRequest(DetachRequest&&) noexcept = default;
|
|
DetachRequest& DetachRequest::operator=(DetachRequest&&) = default;
|
|
|
|
bool DetachRequest::operator==(const DetachRequest& other) const {
|
|
return unknown_fields_ == other.unknown_fields_
|
|
&& key_ == other.key_;
|
|
}
|
|
|
|
bool DetachRequest::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* key */:
|
|
field.get(&key_);
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string DetachRequest::SerializeAsString() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> DetachRequest::SerializeAsArray() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void DetachRequest::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: key
|
|
if (_has_field_[1]) {
|
|
msg->AppendString(1, key_);
|
|
}
|
|
|
|
msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
|
|
}
|
|
|
|
|
|
FlushResponse::FlushResponse() = default;
|
|
FlushResponse::~FlushResponse() = default;
|
|
FlushResponse::FlushResponse(const FlushResponse&) = default;
|
|
FlushResponse& FlushResponse::operator=(const FlushResponse&) = default;
|
|
FlushResponse::FlushResponse(FlushResponse&&) noexcept = default;
|
|
FlushResponse& FlushResponse::operator=(FlushResponse&&) = default;
|
|
|
|
bool FlushResponse::operator==(const FlushResponse& other) const {
|
|
return unknown_fields_ == other.unknown_fields_;
|
|
}
|
|
|
|
bool FlushResponse::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string FlushResponse::SerializeAsString() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> FlushResponse::SerializeAsArray() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void FlushResponse::Serialize(::protozero::Message* msg) const {
|
|
msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
|
|
}
|
|
|
|
|
|
FlushRequest::FlushRequest() = default;
|
|
FlushRequest::~FlushRequest() = default;
|
|
FlushRequest::FlushRequest(const FlushRequest&) = default;
|
|
FlushRequest& FlushRequest::operator=(const FlushRequest&) = default;
|
|
FlushRequest::FlushRequest(FlushRequest&&) noexcept = default;
|
|
FlushRequest& FlushRequest::operator=(FlushRequest&&) = default;
|
|
|
|
bool FlushRequest::operator==(const FlushRequest& other) const {
|
|
return unknown_fields_ == other.unknown_fields_
|
|
&& timeout_ms_ == other.timeout_ms_;
|
|
}
|
|
|
|
bool FlushRequest::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* timeout_ms */:
|
|
field.get(&timeout_ms_);
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string FlushRequest::SerializeAsString() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> FlushRequest::SerializeAsArray() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void FlushRequest::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: timeout_ms
|
|
if (_has_field_[1]) {
|
|
msg->AppendVarInt(1, timeout_ms_);
|
|
}
|
|
|
|
msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
|
|
}
|
|
|
|
|
|
FreeBuffersResponse::FreeBuffersResponse() = default;
|
|
FreeBuffersResponse::~FreeBuffersResponse() = default;
|
|
FreeBuffersResponse::FreeBuffersResponse(const FreeBuffersResponse&) = default;
|
|
FreeBuffersResponse& FreeBuffersResponse::operator=(const FreeBuffersResponse&) = default;
|
|
FreeBuffersResponse::FreeBuffersResponse(FreeBuffersResponse&&) noexcept = default;
|
|
FreeBuffersResponse& FreeBuffersResponse::operator=(FreeBuffersResponse&&) = default;
|
|
|
|
bool FreeBuffersResponse::operator==(const FreeBuffersResponse& other) const {
|
|
return unknown_fields_ == other.unknown_fields_;
|
|
}
|
|
|
|
bool FreeBuffersResponse::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string FreeBuffersResponse::SerializeAsString() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> FreeBuffersResponse::SerializeAsArray() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void FreeBuffersResponse::Serialize(::protozero::Message* msg) const {
|
|
msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
|
|
}
|
|
|
|
|
|
FreeBuffersRequest::FreeBuffersRequest() = default;
|
|
FreeBuffersRequest::~FreeBuffersRequest() = default;
|
|
FreeBuffersRequest::FreeBuffersRequest(const FreeBuffersRequest&) = default;
|
|
FreeBuffersRequest& FreeBuffersRequest::operator=(const FreeBuffersRequest&) = default;
|
|
FreeBuffersRequest::FreeBuffersRequest(FreeBuffersRequest&&) noexcept = default;
|
|
FreeBuffersRequest& FreeBuffersRequest::operator=(FreeBuffersRequest&&) = default;
|
|
|
|
bool FreeBuffersRequest::operator==(const FreeBuffersRequest& other) const {
|
|
return unknown_fields_ == other.unknown_fields_
|
|
&& buffer_ids_ == other.buffer_ids_;
|
|
}
|
|
|
|
bool FreeBuffersRequest::ParseFromArray(const void* raw, size_t size) {
|
|
buffer_ids_.clear();
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* buffer_ids */:
|
|
buffer_ids_.emplace_back();
|
|
field.get(&buffer_ids_.back());
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string FreeBuffersRequest::SerializeAsString() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> FreeBuffersRequest::SerializeAsArray() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void FreeBuffersRequest::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: buffer_ids
|
|
for (auto& it : buffer_ids_) {
|
|
msg->AppendVarInt(1, it);
|
|
}
|
|
|
|
msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
|
|
}
|
|
|
|
|
|
ReadBuffersResponse::ReadBuffersResponse() = default;
|
|
ReadBuffersResponse::~ReadBuffersResponse() = default;
|
|
ReadBuffersResponse::ReadBuffersResponse(const ReadBuffersResponse&) = default;
|
|
ReadBuffersResponse& ReadBuffersResponse::operator=(const ReadBuffersResponse&) = default;
|
|
ReadBuffersResponse::ReadBuffersResponse(ReadBuffersResponse&&) noexcept = default;
|
|
ReadBuffersResponse& ReadBuffersResponse::operator=(ReadBuffersResponse&&) = default;
|
|
|
|
bool ReadBuffersResponse::operator==(const ReadBuffersResponse& other) const {
|
|
return unknown_fields_ == other.unknown_fields_
|
|
&& slices_ == other.slices_;
|
|
}
|
|
|
|
bool ReadBuffersResponse::ParseFromArray(const void* raw, size_t size) {
|
|
slices_.clear();
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 2 /* slices */:
|
|
slices_.emplace_back();
|
|
slices_.back().ParseFromString(field.as_std_string());
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string ReadBuffersResponse::SerializeAsString() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> ReadBuffersResponse::SerializeAsArray() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void ReadBuffersResponse::Serialize(::protozero::Message* msg) const {
|
|
// Field 2: slices
|
|
for (auto& it : slices_) {
|
|
it.Serialize(msg->BeginNestedMessage<::protozero::Message>(2));
|
|
}
|
|
|
|
msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
|
|
}
|
|
|
|
|
|
ReadBuffersResponse_Slice::ReadBuffersResponse_Slice() = default;
|
|
ReadBuffersResponse_Slice::~ReadBuffersResponse_Slice() = default;
|
|
ReadBuffersResponse_Slice::ReadBuffersResponse_Slice(const ReadBuffersResponse_Slice&) = default;
|
|
ReadBuffersResponse_Slice& ReadBuffersResponse_Slice::operator=(const ReadBuffersResponse_Slice&) = default;
|
|
ReadBuffersResponse_Slice::ReadBuffersResponse_Slice(ReadBuffersResponse_Slice&&) noexcept = default;
|
|
ReadBuffersResponse_Slice& ReadBuffersResponse_Slice::operator=(ReadBuffersResponse_Slice&&) = default;
|
|
|
|
bool ReadBuffersResponse_Slice::operator==(const ReadBuffersResponse_Slice& other) const {
|
|
return unknown_fields_ == other.unknown_fields_
|
|
&& data_ == other.data_
|
|
&& last_slice_for_packet_ == other.last_slice_for_packet_;
|
|
}
|
|
|
|
bool ReadBuffersResponse_Slice::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* data */:
|
|
field.get(&data_);
|
|
break;
|
|
case 2 /* last_slice_for_packet */:
|
|
field.get(&last_slice_for_packet_);
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string ReadBuffersResponse_Slice::SerializeAsString() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> ReadBuffersResponse_Slice::SerializeAsArray() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void ReadBuffersResponse_Slice::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: data
|
|
if (_has_field_[1]) {
|
|
msg->AppendString(1, data_);
|
|
}
|
|
|
|
// Field 2: last_slice_for_packet
|
|
if (_has_field_[2]) {
|
|
msg->AppendTinyVarInt(2, last_slice_for_packet_);
|
|
}
|
|
|
|
msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
|
|
}
|
|
|
|
|
|
ReadBuffersRequest::ReadBuffersRequest() = default;
|
|
ReadBuffersRequest::~ReadBuffersRequest() = default;
|
|
ReadBuffersRequest::ReadBuffersRequest(const ReadBuffersRequest&) = default;
|
|
ReadBuffersRequest& ReadBuffersRequest::operator=(const ReadBuffersRequest&) = default;
|
|
ReadBuffersRequest::ReadBuffersRequest(ReadBuffersRequest&&) noexcept = default;
|
|
ReadBuffersRequest& ReadBuffersRequest::operator=(ReadBuffersRequest&&) = default;
|
|
|
|
bool ReadBuffersRequest::operator==(const ReadBuffersRequest& other) const {
|
|
return unknown_fields_ == other.unknown_fields_;
|
|
}
|
|
|
|
bool ReadBuffersRequest::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string ReadBuffersRequest::SerializeAsString() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> ReadBuffersRequest::SerializeAsArray() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void ReadBuffersRequest::Serialize(::protozero::Message* msg) const {
|
|
msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
|
|
}
|
|
|
|
|
|
DisableTracingResponse::DisableTracingResponse() = default;
|
|
DisableTracingResponse::~DisableTracingResponse() = default;
|
|
DisableTracingResponse::DisableTracingResponse(const DisableTracingResponse&) = default;
|
|
DisableTracingResponse& DisableTracingResponse::operator=(const DisableTracingResponse&) = default;
|
|
DisableTracingResponse::DisableTracingResponse(DisableTracingResponse&&) noexcept = default;
|
|
DisableTracingResponse& DisableTracingResponse::operator=(DisableTracingResponse&&) = default;
|
|
|
|
bool DisableTracingResponse::operator==(const DisableTracingResponse& other) const {
|
|
return unknown_fields_ == other.unknown_fields_;
|
|
}
|
|
|
|
bool DisableTracingResponse::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string DisableTracingResponse::SerializeAsString() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> DisableTracingResponse::SerializeAsArray() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void DisableTracingResponse::Serialize(::protozero::Message* msg) const {
|
|
msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
|
|
}
|
|
|
|
|
|
DisableTracingRequest::DisableTracingRequest() = default;
|
|
DisableTracingRequest::~DisableTracingRequest() = default;
|
|
DisableTracingRequest::DisableTracingRequest(const DisableTracingRequest&) = default;
|
|
DisableTracingRequest& DisableTracingRequest::operator=(const DisableTracingRequest&) = default;
|
|
DisableTracingRequest::DisableTracingRequest(DisableTracingRequest&&) noexcept = default;
|
|
DisableTracingRequest& DisableTracingRequest::operator=(DisableTracingRequest&&) = default;
|
|
|
|
bool DisableTracingRequest::operator==(const DisableTracingRequest& other) const {
|
|
return unknown_fields_ == other.unknown_fields_;
|
|
}
|
|
|
|
bool DisableTracingRequest::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string DisableTracingRequest::SerializeAsString() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> DisableTracingRequest::SerializeAsArray() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void DisableTracingRequest::Serialize(::protozero::Message* msg) const {
|
|
msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
|
|
}
|
|
|
|
|
|
ChangeTraceConfigResponse::ChangeTraceConfigResponse() = default;
|
|
ChangeTraceConfigResponse::~ChangeTraceConfigResponse() = default;
|
|
ChangeTraceConfigResponse::ChangeTraceConfigResponse(const ChangeTraceConfigResponse&) = default;
|
|
ChangeTraceConfigResponse& ChangeTraceConfigResponse::operator=(const ChangeTraceConfigResponse&) = default;
|
|
ChangeTraceConfigResponse::ChangeTraceConfigResponse(ChangeTraceConfigResponse&&) noexcept = default;
|
|
ChangeTraceConfigResponse& ChangeTraceConfigResponse::operator=(ChangeTraceConfigResponse&&) = default;
|
|
|
|
bool ChangeTraceConfigResponse::operator==(const ChangeTraceConfigResponse& other) const {
|
|
return unknown_fields_ == other.unknown_fields_;
|
|
}
|
|
|
|
bool ChangeTraceConfigResponse::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string ChangeTraceConfigResponse::SerializeAsString() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> ChangeTraceConfigResponse::SerializeAsArray() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void ChangeTraceConfigResponse::Serialize(::protozero::Message* msg) const {
|
|
msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
|
|
}
|
|
|
|
|
|
ChangeTraceConfigRequest::ChangeTraceConfigRequest() = default;
|
|
ChangeTraceConfigRequest::~ChangeTraceConfigRequest() = default;
|
|
ChangeTraceConfigRequest::ChangeTraceConfigRequest(const ChangeTraceConfigRequest&) = default;
|
|
ChangeTraceConfigRequest& ChangeTraceConfigRequest::operator=(const ChangeTraceConfigRequest&) = default;
|
|
ChangeTraceConfigRequest::ChangeTraceConfigRequest(ChangeTraceConfigRequest&&) noexcept = default;
|
|
ChangeTraceConfigRequest& ChangeTraceConfigRequest::operator=(ChangeTraceConfigRequest&&) = default;
|
|
|
|
bool ChangeTraceConfigRequest::operator==(const ChangeTraceConfigRequest& other) const {
|
|
return unknown_fields_ == other.unknown_fields_
|
|
&& trace_config_ == other.trace_config_;
|
|
}
|
|
|
|
bool ChangeTraceConfigRequest::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* trace_config */:
|
|
(*trace_config_).ParseFromString(field.as_std_string());
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string ChangeTraceConfigRequest::SerializeAsString() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> ChangeTraceConfigRequest::SerializeAsArray() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void ChangeTraceConfigRequest::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: trace_config
|
|
if (_has_field_[1]) {
|
|
(*trace_config_).Serialize(msg->BeginNestedMessage<::protozero::Message>(1));
|
|
}
|
|
|
|
msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
|
|
}
|
|
|
|
|
|
StartTracingResponse::StartTracingResponse() = default;
|
|
StartTracingResponse::~StartTracingResponse() = default;
|
|
StartTracingResponse::StartTracingResponse(const StartTracingResponse&) = default;
|
|
StartTracingResponse& StartTracingResponse::operator=(const StartTracingResponse&) = default;
|
|
StartTracingResponse::StartTracingResponse(StartTracingResponse&&) noexcept = default;
|
|
StartTracingResponse& StartTracingResponse::operator=(StartTracingResponse&&) = default;
|
|
|
|
bool StartTracingResponse::operator==(const StartTracingResponse& other) const {
|
|
return unknown_fields_ == other.unknown_fields_;
|
|
}
|
|
|
|
bool StartTracingResponse::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string StartTracingResponse::SerializeAsString() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> StartTracingResponse::SerializeAsArray() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void StartTracingResponse::Serialize(::protozero::Message* msg) const {
|
|
msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
|
|
}
|
|
|
|
|
|
StartTracingRequest::StartTracingRequest() = default;
|
|
StartTracingRequest::~StartTracingRequest() = default;
|
|
StartTracingRequest::StartTracingRequest(const StartTracingRequest&) = default;
|
|
StartTracingRequest& StartTracingRequest::operator=(const StartTracingRequest&) = default;
|
|
StartTracingRequest::StartTracingRequest(StartTracingRequest&&) noexcept = default;
|
|
StartTracingRequest& StartTracingRequest::operator=(StartTracingRequest&&) = default;
|
|
|
|
bool StartTracingRequest::operator==(const StartTracingRequest& other) const {
|
|
return unknown_fields_ == other.unknown_fields_;
|
|
}
|
|
|
|
bool StartTracingRequest::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string StartTracingRequest::SerializeAsString() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> StartTracingRequest::SerializeAsArray() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void StartTracingRequest::Serialize(::protozero::Message* msg) const {
|
|
msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
|
|
}
|
|
|
|
|
|
EnableTracingResponse::EnableTracingResponse() = default;
|
|
EnableTracingResponse::~EnableTracingResponse() = default;
|
|
EnableTracingResponse::EnableTracingResponse(const EnableTracingResponse&) = default;
|
|
EnableTracingResponse& EnableTracingResponse::operator=(const EnableTracingResponse&) = default;
|
|
EnableTracingResponse::EnableTracingResponse(EnableTracingResponse&&) noexcept = default;
|
|
EnableTracingResponse& EnableTracingResponse::operator=(EnableTracingResponse&&) = default;
|
|
|
|
bool EnableTracingResponse::operator==(const EnableTracingResponse& other) const {
|
|
return unknown_fields_ == other.unknown_fields_
|
|
&& disabled_ == other.disabled_;
|
|
}
|
|
|
|
bool EnableTracingResponse::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* disabled */:
|
|
field.get(&disabled_);
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string EnableTracingResponse::SerializeAsString() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> EnableTracingResponse::SerializeAsArray() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void EnableTracingResponse::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: disabled
|
|
if (_has_field_[1]) {
|
|
msg->AppendTinyVarInt(1, disabled_);
|
|
}
|
|
|
|
msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
|
|
}
|
|
|
|
|
|
EnableTracingRequest::EnableTracingRequest() = default;
|
|
EnableTracingRequest::~EnableTracingRequest() = default;
|
|
EnableTracingRequest::EnableTracingRequest(const EnableTracingRequest&) = default;
|
|
EnableTracingRequest& EnableTracingRequest::operator=(const EnableTracingRequest&) = default;
|
|
EnableTracingRequest::EnableTracingRequest(EnableTracingRequest&&) noexcept = default;
|
|
EnableTracingRequest& EnableTracingRequest::operator=(EnableTracingRequest&&) = default;
|
|
|
|
bool EnableTracingRequest::operator==(const EnableTracingRequest& other) const {
|
|
return unknown_fields_ == other.unknown_fields_
|
|
&& trace_config_ == other.trace_config_
|
|
&& attach_notification_only_ == other.attach_notification_only_;
|
|
}
|
|
|
|
bool EnableTracingRequest::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* trace_config */:
|
|
(*trace_config_).ParseFromString(field.as_std_string());
|
|
break;
|
|
case 2 /* attach_notification_only */:
|
|
field.get(&attach_notification_only_);
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string EnableTracingRequest::SerializeAsString() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> EnableTracingRequest::SerializeAsArray() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void EnableTracingRequest::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: trace_config
|
|
if (_has_field_[1]) {
|
|
(*trace_config_).Serialize(msg->BeginNestedMessage<::protozero::Message>(1));
|
|
}
|
|
|
|
// Field 2: attach_notification_only
|
|
if (_has_field_[2]) {
|
|
msg->AppendTinyVarInt(2, attach_notification_only_);
|
|
}
|
|
|
|
msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
|
|
}
|
|
|
|
} // namespace perfetto
|
|
} // namespace protos
|
|
} // namespace gen
|
|
#pragma GCC diagnostic pop
|
|
// gen_amalgamated begin source: gen/protos/perfetto/ipc/producer_port.gen.cc
|
|
// gen_amalgamated begin header: gen/protos/perfetto/ipc/producer_port.gen.h
|
|
// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
|
|
#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_IPC_PRODUCER_PORT_PROTO_CPP_H_
|
|
#define PERFETTO_PROTOS_PROTOS_PERFETTO_IPC_PRODUCER_PORT_PROTO_CPP_H_
|
|
|
|
#include <stdint.h>
|
|
#include <bitset>
|
|
#include <vector>
|
|
#include <string>
|
|
#include <type_traits>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/cpp_message_obj.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/copyable_ptr.h"
|
|
// gen_amalgamated expanded: #include "perfetto/base/export.h"
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace gen {
|
|
class SyncResponse;
|
|
class SyncRequest;
|
|
class GetAsyncCommandResponse;
|
|
class GetAsyncCommandResponse_ClearIncrementalState;
|
|
class GetAsyncCommandResponse_Flush;
|
|
class GetAsyncCommandResponse_StopDataSource;
|
|
class GetAsyncCommandResponse_StartDataSource;
|
|
class DataSourceConfig;
|
|
class TestConfig;
|
|
class TestConfig_DummyFields;
|
|
class ChromeConfig;
|
|
class GetAsyncCommandResponse_SetupDataSource;
|
|
class GetAsyncCommandResponse_SetupTracing;
|
|
class GetAsyncCommandRequest;
|
|
class ActivateTriggersResponse;
|
|
class ActivateTriggersRequest;
|
|
class NotifyDataSourceStoppedResponse;
|
|
class NotifyDataSourceStoppedRequest;
|
|
class NotifyDataSourceStartedResponse;
|
|
class NotifyDataSourceStartedRequest;
|
|
class CommitDataResponse;
|
|
class UnregisterTraceWriterResponse;
|
|
class UnregisterTraceWriterRequest;
|
|
class RegisterTraceWriterResponse;
|
|
class RegisterTraceWriterRequest;
|
|
class UnregisterDataSourceResponse;
|
|
class UnregisterDataSourceRequest;
|
|
class RegisterDataSourceResponse;
|
|
class RegisterDataSourceRequest;
|
|
class DataSourceDescriptor;
|
|
class InitializeConnectionResponse;
|
|
class InitializeConnectionRequest;
|
|
enum InitializeConnectionRequest_ProducerSMBScrapingMode : int;
|
|
enum InitializeConnectionRequest_ProducerBuildFlags : int;
|
|
} // namespace perfetto
|
|
} // namespace protos
|
|
} // namespace gen
|
|
|
|
namespace protozero {
|
|
class Message;
|
|
} // namespace protozero
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace gen {
|
|
enum InitializeConnectionRequest_ProducerSMBScrapingMode : int {
|
|
InitializeConnectionRequest_ProducerSMBScrapingMode_SMB_SCRAPING_UNSPECIFIED = 0,
|
|
InitializeConnectionRequest_ProducerSMBScrapingMode_SMB_SCRAPING_ENABLED = 1,
|
|
InitializeConnectionRequest_ProducerSMBScrapingMode_SMB_SCRAPING_DISABLED = 2,
|
|
};
|
|
enum InitializeConnectionRequest_ProducerBuildFlags : int {
|
|
InitializeConnectionRequest_ProducerBuildFlags_BUILD_FLAGS_UNSPECIFIED = 0,
|
|
InitializeConnectionRequest_ProducerBuildFlags_BUILD_FLAGS_DCHECKS_ON = 1,
|
|
InitializeConnectionRequest_ProducerBuildFlags_BUILD_FLAGS_DCHECKS_OFF = 2,
|
|
};
|
|
|
|
class PERFETTO_EXPORT SyncResponse : public ::protozero::CppMessageObj {
|
|
public:
|
|
enum FieldNumbers {
|
|
};
|
|
|
|
SyncResponse();
|
|
~SyncResponse() override;
|
|
SyncResponse(SyncResponse&&) noexcept;
|
|
SyncResponse& operator=(SyncResponse&&);
|
|
SyncResponse(const SyncResponse&);
|
|
SyncResponse& operator=(const SyncResponse&);
|
|
bool operator==(const SyncResponse&) const;
|
|
bool operator!=(const SyncResponse& other) const { return !(*this == other); }
|
|
|
|
bool ParseFromArray(const void*, size_t) override;
|
|
std::string SerializeAsString() const override;
|
|
std::vector<uint8_t> SerializeAsArray() const override;
|
|
void Serialize(::protozero::Message*) const;
|
|
|
|
private:
|
|
|
|
// Allows to preserve unknown protobuf fields for compatibility
|
|
// with future versions of .proto files.
|
|
std::string unknown_fields_;
|
|
|
|
std::bitset<2> _has_field_{};
|
|
};
|
|
|
|
|
|
class PERFETTO_EXPORT SyncRequest : public ::protozero::CppMessageObj {
|
|
public:
|
|
enum FieldNumbers {
|
|
};
|
|
|
|
SyncRequest();
|
|
~SyncRequest() override;
|
|
SyncRequest(SyncRequest&&) noexcept;
|
|
SyncRequest& operator=(SyncRequest&&);
|
|
SyncRequest(const SyncRequest&);
|
|
SyncRequest& operator=(const SyncRequest&);
|
|
bool operator==(const SyncRequest&) const;
|
|
bool operator!=(const SyncRequest& other) const { return !(*this == other); }
|
|
|
|
bool ParseFromArray(const void*, size_t) override;
|
|
std::string SerializeAsString() const override;
|
|
std::vector<uint8_t> SerializeAsArray() const override;
|
|
void Serialize(::protozero::Message*) const;
|
|
|
|
private:
|
|
|
|
// Allows to preserve unknown protobuf fields for compatibility
|
|
// with future versions of .proto files.
|
|
std::string unknown_fields_;
|
|
|
|
std::bitset<2> _has_field_{};
|
|
};
|
|
|
|
|
|
class PERFETTO_EXPORT GetAsyncCommandResponse : public ::protozero::CppMessageObj {
|
|
public:
|
|
using SetupDataSource = GetAsyncCommandResponse_SetupDataSource;
|
|
using StartDataSource = GetAsyncCommandResponse_StartDataSource;
|
|
using StopDataSource = GetAsyncCommandResponse_StopDataSource;
|
|
using SetupTracing = GetAsyncCommandResponse_SetupTracing;
|
|
using Flush = GetAsyncCommandResponse_Flush;
|
|
using ClearIncrementalState = GetAsyncCommandResponse_ClearIncrementalState;
|
|
enum FieldNumbers {
|
|
kSetupTracingFieldNumber = 3,
|
|
kSetupDataSourceFieldNumber = 6,
|
|
kStartDataSourceFieldNumber = 1,
|
|
kStopDataSourceFieldNumber = 2,
|
|
kFlushFieldNumber = 5,
|
|
kClearIncrementalStateFieldNumber = 7,
|
|
};
|
|
|
|
GetAsyncCommandResponse();
|
|
~GetAsyncCommandResponse() override;
|
|
GetAsyncCommandResponse(GetAsyncCommandResponse&&) noexcept;
|
|
GetAsyncCommandResponse& operator=(GetAsyncCommandResponse&&);
|
|
GetAsyncCommandResponse(const GetAsyncCommandResponse&);
|
|
GetAsyncCommandResponse& operator=(const GetAsyncCommandResponse&);
|
|
bool operator==(const GetAsyncCommandResponse&) const;
|
|
bool operator!=(const GetAsyncCommandResponse& other) const { return !(*this == other); }
|
|
|
|
bool ParseFromArray(const void*, size_t) override;
|
|
std::string SerializeAsString() const override;
|
|
std::vector<uint8_t> SerializeAsArray() const override;
|
|
void Serialize(::protozero::Message*) const;
|
|
|
|
bool has_setup_tracing() const { return _has_field_[3]; }
|
|
const GetAsyncCommandResponse_SetupTracing& setup_tracing() const { return *setup_tracing_; }
|
|
GetAsyncCommandResponse_SetupTracing* mutable_setup_tracing() { _has_field_.set(3); return setup_tracing_.get(); }
|
|
|
|
bool has_setup_data_source() const { return _has_field_[6]; }
|
|
const GetAsyncCommandResponse_SetupDataSource& setup_data_source() const { return *setup_data_source_; }
|
|
GetAsyncCommandResponse_SetupDataSource* mutable_setup_data_source() { _has_field_.set(6); return setup_data_source_.get(); }
|
|
|
|
bool has_start_data_source() const { return _has_field_[1]; }
|
|
const GetAsyncCommandResponse_StartDataSource& start_data_source() const { return *start_data_source_; }
|
|
GetAsyncCommandResponse_StartDataSource* mutable_start_data_source() { _has_field_.set(1); return start_data_source_.get(); }
|
|
|
|
bool has_stop_data_source() const { return _has_field_[2]; }
|
|
const GetAsyncCommandResponse_StopDataSource& stop_data_source() const { return *stop_data_source_; }
|
|
GetAsyncCommandResponse_StopDataSource* mutable_stop_data_source() { _has_field_.set(2); return stop_data_source_.get(); }
|
|
|
|
bool has_flush() const { return _has_field_[5]; }
|
|
const GetAsyncCommandResponse_Flush& flush() const { return *flush_; }
|
|
GetAsyncCommandResponse_Flush* mutable_flush() { _has_field_.set(5); return flush_.get(); }
|
|
|
|
bool has_clear_incremental_state() const { return _has_field_[7]; }
|
|
const GetAsyncCommandResponse_ClearIncrementalState& clear_incremental_state() const { return *clear_incremental_state_; }
|
|
GetAsyncCommandResponse_ClearIncrementalState* mutable_clear_incremental_state() { _has_field_.set(7); return clear_incremental_state_.get(); }
|
|
|
|
private:
|
|
::protozero::CopyablePtr<GetAsyncCommandResponse_SetupTracing> setup_tracing_;
|
|
::protozero::CopyablePtr<GetAsyncCommandResponse_SetupDataSource> setup_data_source_;
|
|
::protozero::CopyablePtr<GetAsyncCommandResponse_StartDataSource> start_data_source_;
|
|
::protozero::CopyablePtr<GetAsyncCommandResponse_StopDataSource> stop_data_source_;
|
|
::protozero::CopyablePtr<GetAsyncCommandResponse_Flush> flush_;
|
|
::protozero::CopyablePtr<GetAsyncCommandResponse_ClearIncrementalState> clear_incremental_state_;
|
|
|
|
// Allows to preserve unknown protobuf fields for compatibility
|
|
// with future versions of .proto files.
|
|
std::string unknown_fields_;
|
|
|
|
std::bitset<8> _has_field_{};
|
|
};
|
|
|
|
|
|
class PERFETTO_EXPORT GetAsyncCommandResponse_ClearIncrementalState : public ::protozero::CppMessageObj {
|
|
public:
|
|
enum FieldNumbers {
|
|
kDataSourceIdsFieldNumber = 1,
|
|
};
|
|
|
|
GetAsyncCommandResponse_ClearIncrementalState();
|
|
~GetAsyncCommandResponse_ClearIncrementalState() override;
|
|
GetAsyncCommandResponse_ClearIncrementalState(GetAsyncCommandResponse_ClearIncrementalState&&) noexcept;
|
|
GetAsyncCommandResponse_ClearIncrementalState& operator=(GetAsyncCommandResponse_ClearIncrementalState&&);
|
|
GetAsyncCommandResponse_ClearIncrementalState(const GetAsyncCommandResponse_ClearIncrementalState&);
|
|
GetAsyncCommandResponse_ClearIncrementalState& operator=(const GetAsyncCommandResponse_ClearIncrementalState&);
|
|
bool operator==(const GetAsyncCommandResponse_ClearIncrementalState&) const;
|
|
bool operator!=(const GetAsyncCommandResponse_ClearIncrementalState& other) const { return !(*this == other); }
|
|
|
|
bool ParseFromArray(const void*, size_t) override;
|
|
std::string SerializeAsString() const override;
|
|
std::vector<uint8_t> SerializeAsArray() const override;
|
|
void Serialize(::protozero::Message*) const;
|
|
|
|
int data_source_ids_size() const { return static_cast<int>(data_source_ids_.size()); }
|
|
const std::vector<uint64_t>& data_source_ids() const { return data_source_ids_; }
|
|
std::vector<uint64_t>* mutable_data_source_ids() { return &data_source_ids_; }
|
|
void clear_data_source_ids() { data_source_ids_.clear(); }
|
|
void add_data_source_ids(uint64_t value) { data_source_ids_.emplace_back(value); }
|
|
uint64_t* add_data_source_ids() { data_source_ids_.emplace_back(); return &data_source_ids_.back(); }
|
|
|
|
private:
|
|
std::vector<uint64_t> data_source_ids_;
|
|
|
|
// Allows to preserve unknown protobuf fields for compatibility
|
|
// with future versions of .proto files.
|
|
std::string unknown_fields_;
|
|
|
|
std::bitset<2> _has_field_{};
|
|
};
|
|
|
|
|
|
class PERFETTO_EXPORT GetAsyncCommandResponse_Flush : public ::protozero::CppMessageObj {
|
|
public:
|
|
enum FieldNumbers {
|
|
kDataSourceIdsFieldNumber = 1,
|
|
kRequestIdFieldNumber = 2,
|
|
};
|
|
|
|
GetAsyncCommandResponse_Flush();
|
|
~GetAsyncCommandResponse_Flush() override;
|
|
GetAsyncCommandResponse_Flush(GetAsyncCommandResponse_Flush&&) noexcept;
|
|
GetAsyncCommandResponse_Flush& operator=(GetAsyncCommandResponse_Flush&&);
|
|
GetAsyncCommandResponse_Flush(const GetAsyncCommandResponse_Flush&);
|
|
GetAsyncCommandResponse_Flush& operator=(const GetAsyncCommandResponse_Flush&);
|
|
bool operator==(const GetAsyncCommandResponse_Flush&) const;
|
|
bool operator!=(const GetAsyncCommandResponse_Flush& other) const { return !(*this == other); }
|
|
|
|
bool ParseFromArray(const void*, size_t) override;
|
|
std::string SerializeAsString() const override;
|
|
std::vector<uint8_t> SerializeAsArray() const override;
|
|
void Serialize(::protozero::Message*) const;
|
|
|
|
int data_source_ids_size() const { return static_cast<int>(data_source_ids_.size()); }
|
|
const std::vector<uint64_t>& data_source_ids() const { return data_source_ids_; }
|
|
std::vector<uint64_t>* mutable_data_source_ids() { return &data_source_ids_; }
|
|
void clear_data_source_ids() { data_source_ids_.clear(); }
|
|
void add_data_source_ids(uint64_t value) { data_source_ids_.emplace_back(value); }
|
|
uint64_t* add_data_source_ids() { data_source_ids_.emplace_back(); return &data_source_ids_.back(); }
|
|
|
|
bool has_request_id() const { return _has_field_[2]; }
|
|
uint64_t request_id() const { return request_id_; }
|
|
void set_request_id(uint64_t value) { request_id_ = value; _has_field_.set(2); }
|
|
|
|
private:
|
|
std::vector<uint64_t> data_source_ids_;
|
|
uint64_t request_id_{};
|
|
|
|
// Allows to preserve unknown protobuf fields for compatibility
|
|
// with future versions of .proto files.
|
|
std::string unknown_fields_;
|
|
|
|
std::bitset<3> _has_field_{};
|
|
};
|
|
|
|
|
|
class PERFETTO_EXPORT GetAsyncCommandResponse_StopDataSource : public ::protozero::CppMessageObj {
|
|
public:
|
|
enum FieldNumbers {
|
|
kInstanceIdFieldNumber = 1,
|
|
};
|
|
|
|
GetAsyncCommandResponse_StopDataSource();
|
|
~GetAsyncCommandResponse_StopDataSource() override;
|
|
GetAsyncCommandResponse_StopDataSource(GetAsyncCommandResponse_StopDataSource&&) noexcept;
|
|
GetAsyncCommandResponse_StopDataSource& operator=(GetAsyncCommandResponse_StopDataSource&&);
|
|
GetAsyncCommandResponse_StopDataSource(const GetAsyncCommandResponse_StopDataSource&);
|
|
GetAsyncCommandResponse_StopDataSource& operator=(const GetAsyncCommandResponse_StopDataSource&);
|
|
bool operator==(const GetAsyncCommandResponse_StopDataSource&) const;
|
|
bool operator!=(const GetAsyncCommandResponse_StopDataSource& other) const { return !(*this == other); }
|
|
|
|
bool ParseFromArray(const void*, size_t) override;
|
|
std::string SerializeAsString() const override;
|
|
std::vector<uint8_t> SerializeAsArray() const override;
|
|
void Serialize(::protozero::Message*) const;
|
|
|
|
bool has_instance_id() const { return _has_field_[1]; }
|
|
uint64_t instance_id() const { return instance_id_; }
|
|
void set_instance_id(uint64_t value) { instance_id_ = value; _has_field_.set(1); }
|
|
|
|
private:
|
|
uint64_t instance_id_{};
|
|
|
|
// Allows to preserve unknown protobuf fields for compatibility
|
|
// with future versions of .proto files.
|
|
std::string unknown_fields_;
|
|
|
|
std::bitset<2> _has_field_{};
|
|
};
|
|
|
|
|
|
class PERFETTO_EXPORT GetAsyncCommandResponse_StartDataSource : public ::protozero::CppMessageObj {
|
|
public:
|
|
enum FieldNumbers {
|
|
kNewInstanceIdFieldNumber = 1,
|
|
kConfigFieldNumber = 2,
|
|
};
|
|
|
|
GetAsyncCommandResponse_StartDataSource();
|
|
~GetAsyncCommandResponse_StartDataSource() override;
|
|
GetAsyncCommandResponse_StartDataSource(GetAsyncCommandResponse_StartDataSource&&) noexcept;
|
|
GetAsyncCommandResponse_StartDataSource& operator=(GetAsyncCommandResponse_StartDataSource&&);
|
|
GetAsyncCommandResponse_StartDataSource(const GetAsyncCommandResponse_StartDataSource&);
|
|
GetAsyncCommandResponse_StartDataSource& operator=(const GetAsyncCommandResponse_StartDataSource&);
|
|
bool operator==(const GetAsyncCommandResponse_StartDataSource&) const;
|
|
bool operator!=(const GetAsyncCommandResponse_StartDataSource& other) const { return !(*this == other); }
|
|
|
|
bool ParseFromArray(const void*, size_t) override;
|
|
std::string SerializeAsString() const override;
|
|
std::vector<uint8_t> SerializeAsArray() const override;
|
|
void Serialize(::protozero::Message*) const;
|
|
|
|
bool has_new_instance_id() const { return _has_field_[1]; }
|
|
uint64_t new_instance_id() const { return new_instance_id_; }
|
|
void set_new_instance_id(uint64_t value) { new_instance_id_ = value; _has_field_.set(1); }
|
|
|
|
bool has_config() const { return _has_field_[2]; }
|
|
const DataSourceConfig& config() const { return *config_; }
|
|
DataSourceConfig* mutable_config() { _has_field_.set(2); return config_.get(); }
|
|
|
|
private:
|
|
uint64_t new_instance_id_{};
|
|
::protozero::CopyablePtr<DataSourceConfig> config_;
|
|
|
|
// Allows to preserve unknown protobuf fields for compatibility
|
|
// with future versions of .proto files.
|
|
std::string unknown_fields_;
|
|
|
|
std::bitset<3> _has_field_{};
|
|
};
|
|
|
|
|
|
class PERFETTO_EXPORT GetAsyncCommandResponse_SetupDataSource : public ::protozero::CppMessageObj {
|
|
public:
|
|
enum FieldNumbers {
|
|
kNewInstanceIdFieldNumber = 1,
|
|
kConfigFieldNumber = 2,
|
|
};
|
|
|
|
GetAsyncCommandResponse_SetupDataSource();
|
|
~GetAsyncCommandResponse_SetupDataSource() override;
|
|
GetAsyncCommandResponse_SetupDataSource(GetAsyncCommandResponse_SetupDataSource&&) noexcept;
|
|
GetAsyncCommandResponse_SetupDataSource& operator=(GetAsyncCommandResponse_SetupDataSource&&);
|
|
GetAsyncCommandResponse_SetupDataSource(const GetAsyncCommandResponse_SetupDataSource&);
|
|
GetAsyncCommandResponse_SetupDataSource& operator=(const GetAsyncCommandResponse_SetupDataSource&);
|
|
bool operator==(const GetAsyncCommandResponse_SetupDataSource&) const;
|
|
bool operator!=(const GetAsyncCommandResponse_SetupDataSource& other) const { return !(*this == other); }
|
|
|
|
bool ParseFromArray(const void*, size_t) override;
|
|
std::string SerializeAsString() const override;
|
|
std::vector<uint8_t> SerializeAsArray() const override;
|
|
void Serialize(::protozero::Message*) const;
|
|
|
|
bool has_new_instance_id() const { return _has_field_[1]; }
|
|
uint64_t new_instance_id() const { return new_instance_id_; }
|
|
void set_new_instance_id(uint64_t value) { new_instance_id_ = value; _has_field_.set(1); }
|
|
|
|
bool has_config() const { return _has_field_[2]; }
|
|
const DataSourceConfig& config() const { return *config_; }
|
|
DataSourceConfig* mutable_config() { _has_field_.set(2); return config_.get(); }
|
|
|
|
private:
|
|
uint64_t new_instance_id_{};
|
|
::protozero::CopyablePtr<DataSourceConfig> config_;
|
|
|
|
// Allows to preserve unknown protobuf fields for compatibility
|
|
// with future versions of .proto files.
|
|
std::string unknown_fields_;
|
|
|
|
std::bitset<3> _has_field_{};
|
|
};
|
|
|
|
|
|
class PERFETTO_EXPORT GetAsyncCommandResponse_SetupTracing : public ::protozero::CppMessageObj {
|
|
public:
|
|
enum FieldNumbers {
|
|
kSharedBufferPageSizeKbFieldNumber = 1,
|
|
};
|
|
|
|
GetAsyncCommandResponse_SetupTracing();
|
|
~GetAsyncCommandResponse_SetupTracing() override;
|
|
GetAsyncCommandResponse_SetupTracing(GetAsyncCommandResponse_SetupTracing&&) noexcept;
|
|
GetAsyncCommandResponse_SetupTracing& operator=(GetAsyncCommandResponse_SetupTracing&&);
|
|
GetAsyncCommandResponse_SetupTracing(const GetAsyncCommandResponse_SetupTracing&);
|
|
GetAsyncCommandResponse_SetupTracing& operator=(const GetAsyncCommandResponse_SetupTracing&);
|
|
bool operator==(const GetAsyncCommandResponse_SetupTracing&) const;
|
|
bool operator!=(const GetAsyncCommandResponse_SetupTracing& other) const { return !(*this == other); }
|
|
|
|
bool ParseFromArray(const void*, size_t) override;
|
|
std::string SerializeAsString() const override;
|
|
std::vector<uint8_t> SerializeAsArray() const override;
|
|
void Serialize(::protozero::Message*) const;
|
|
|
|
bool has_shared_buffer_page_size_kb() const { return _has_field_[1]; }
|
|
uint32_t shared_buffer_page_size_kb() const { return shared_buffer_page_size_kb_; }
|
|
void set_shared_buffer_page_size_kb(uint32_t value) { shared_buffer_page_size_kb_ = value; _has_field_.set(1); }
|
|
|
|
private:
|
|
uint32_t shared_buffer_page_size_kb_{};
|
|
|
|
// Allows to preserve unknown protobuf fields for compatibility
|
|
// with future versions of .proto files.
|
|
std::string unknown_fields_;
|
|
|
|
std::bitset<2> _has_field_{};
|
|
};
|
|
|
|
|
|
class PERFETTO_EXPORT GetAsyncCommandRequest : public ::protozero::CppMessageObj {
|
|
public:
|
|
enum FieldNumbers {
|
|
};
|
|
|
|
GetAsyncCommandRequest();
|
|
~GetAsyncCommandRequest() override;
|
|
GetAsyncCommandRequest(GetAsyncCommandRequest&&) noexcept;
|
|
GetAsyncCommandRequest& operator=(GetAsyncCommandRequest&&);
|
|
GetAsyncCommandRequest(const GetAsyncCommandRequest&);
|
|
GetAsyncCommandRequest& operator=(const GetAsyncCommandRequest&);
|
|
bool operator==(const GetAsyncCommandRequest&) const;
|
|
bool operator!=(const GetAsyncCommandRequest& other) const { return !(*this == other); }
|
|
|
|
bool ParseFromArray(const void*, size_t) override;
|
|
std::string SerializeAsString() const override;
|
|
std::vector<uint8_t> SerializeAsArray() const override;
|
|
void Serialize(::protozero::Message*) const;
|
|
|
|
private:
|
|
|
|
// Allows to preserve unknown protobuf fields for compatibility
|
|
// with future versions of .proto files.
|
|
std::string unknown_fields_;
|
|
|
|
std::bitset<2> _has_field_{};
|
|
};
|
|
|
|
|
|
class PERFETTO_EXPORT ActivateTriggersResponse : public ::protozero::CppMessageObj {
|
|
public:
|
|
enum FieldNumbers {
|
|
};
|
|
|
|
ActivateTriggersResponse();
|
|
~ActivateTriggersResponse() override;
|
|
ActivateTriggersResponse(ActivateTriggersResponse&&) noexcept;
|
|
ActivateTriggersResponse& operator=(ActivateTriggersResponse&&);
|
|
ActivateTriggersResponse(const ActivateTriggersResponse&);
|
|
ActivateTriggersResponse& operator=(const ActivateTriggersResponse&);
|
|
bool operator==(const ActivateTriggersResponse&) const;
|
|
bool operator!=(const ActivateTriggersResponse& other) const { return !(*this == other); }
|
|
|
|
bool ParseFromArray(const void*, size_t) override;
|
|
std::string SerializeAsString() const override;
|
|
std::vector<uint8_t> SerializeAsArray() const override;
|
|
void Serialize(::protozero::Message*) const;
|
|
|
|
private:
|
|
|
|
// Allows to preserve unknown protobuf fields for compatibility
|
|
// with future versions of .proto files.
|
|
std::string unknown_fields_;
|
|
|
|
std::bitset<2> _has_field_{};
|
|
};
|
|
|
|
|
|
class PERFETTO_EXPORT ActivateTriggersRequest : public ::protozero::CppMessageObj {
|
|
public:
|
|
enum FieldNumbers {
|
|
kTriggerNamesFieldNumber = 1,
|
|
};
|
|
|
|
ActivateTriggersRequest();
|
|
~ActivateTriggersRequest() override;
|
|
ActivateTriggersRequest(ActivateTriggersRequest&&) noexcept;
|
|
ActivateTriggersRequest& operator=(ActivateTriggersRequest&&);
|
|
ActivateTriggersRequest(const ActivateTriggersRequest&);
|
|
ActivateTriggersRequest& operator=(const ActivateTriggersRequest&);
|
|
bool operator==(const ActivateTriggersRequest&) const;
|
|
bool operator!=(const ActivateTriggersRequest& other) const { return !(*this == other); }
|
|
|
|
bool ParseFromArray(const void*, size_t) override;
|
|
std::string SerializeAsString() const override;
|
|
std::vector<uint8_t> SerializeAsArray() const override;
|
|
void Serialize(::protozero::Message*) const;
|
|
|
|
int trigger_names_size() const { return static_cast<int>(trigger_names_.size()); }
|
|
const std::vector<std::string>& trigger_names() const { return trigger_names_; }
|
|
std::vector<std::string>* mutable_trigger_names() { return &trigger_names_; }
|
|
void clear_trigger_names() { trigger_names_.clear(); }
|
|
void add_trigger_names(std::string value) { trigger_names_.emplace_back(value); }
|
|
std::string* add_trigger_names() { trigger_names_.emplace_back(); return &trigger_names_.back(); }
|
|
|
|
private:
|
|
std::vector<std::string> trigger_names_;
|
|
|
|
// Allows to preserve unknown protobuf fields for compatibility
|
|
// with future versions of .proto files.
|
|
std::string unknown_fields_;
|
|
|
|
std::bitset<2> _has_field_{};
|
|
};
|
|
|
|
|
|
class PERFETTO_EXPORT NotifyDataSourceStoppedResponse : public ::protozero::CppMessageObj {
|
|
public:
|
|
enum FieldNumbers {
|
|
};
|
|
|
|
NotifyDataSourceStoppedResponse();
|
|
~NotifyDataSourceStoppedResponse() override;
|
|
NotifyDataSourceStoppedResponse(NotifyDataSourceStoppedResponse&&) noexcept;
|
|
NotifyDataSourceStoppedResponse& operator=(NotifyDataSourceStoppedResponse&&);
|
|
NotifyDataSourceStoppedResponse(const NotifyDataSourceStoppedResponse&);
|
|
NotifyDataSourceStoppedResponse& operator=(const NotifyDataSourceStoppedResponse&);
|
|
bool operator==(const NotifyDataSourceStoppedResponse&) const;
|
|
bool operator!=(const NotifyDataSourceStoppedResponse& other) const { return !(*this == other); }
|
|
|
|
bool ParseFromArray(const void*, size_t) override;
|
|
std::string SerializeAsString() const override;
|
|
std::vector<uint8_t> SerializeAsArray() const override;
|
|
void Serialize(::protozero::Message*) const;
|
|
|
|
private:
|
|
|
|
// Allows to preserve unknown protobuf fields for compatibility
|
|
// with future versions of .proto files.
|
|
std::string unknown_fields_;
|
|
|
|
std::bitset<2> _has_field_{};
|
|
};
|
|
|
|
|
|
class PERFETTO_EXPORT NotifyDataSourceStoppedRequest : public ::protozero::CppMessageObj {
|
|
public:
|
|
enum FieldNumbers {
|
|
kDataSourceIdFieldNumber = 1,
|
|
};
|
|
|
|
NotifyDataSourceStoppedRequest();
|
|
~NotifyDataSourceStoppedRequest() override;
|
|
NotifyDataSourceStoppedRequest(NotifyDataSourceStoppedRequest&&) noexcept;
|
|
NotifyDataSourceStoppedRequest& operator=(NotifyDataSourceStoppedRequest&&);
|
|
NotifyDataSourceStoppedRequest(const NotifyDataSourceStoppedRequest&);
|
|
NotifyDataSourceStoppedRequest& operator=(const NotifyDataSourceStoppedRequest&);
|
|
bool operator==(const NotifyDataSourceStoppedRequest&) const;
|
|
bool operator!=(const NotifyDataSourceStoppedRequest& other) const { return !(*this == other); }
|
|
|
|
bool ParseFromArray(const void*, size_t) override;
|
|
std::string SerializeAsString() const override;
|
|
std::vector<uint8_t> SerializeAsArray() const override;
|
|
void Serialize(::protozero::Message*) const;
|
|
|
|
bool has_data_source_id() const { return _has_field_[1]; }
|
|
uint64_t data_source_id() const { return data_source_id_; }
|
|
void set_data_source_id(uint64_t value) { data_source_id_ = value; _has_field_.set(1); }
|
|
|
|
private:
|
|
uint64_t data_source_id_{};
|
|
|
|
// Allows to preserve unknown protobuf fields for compatibility
|
|
// with future versions of .proto files.
|
|
std::string unknown_fields_;
|
|
|
|
std::bitset<2> _has_field_{};
|
|
};
|
|
|
|
|
|
class PERFETTO_EXPORT NotifyDataSourceStartedResponse : public ::protozero::CppMessageObj {
|
|
public:
|
|
enum FieldNumbers {
|
|
};
|
|
|
|
NotifyDataSourceStartedResponse();
|
|
~NotifyDataSourceStartedResponse() override;
|
|
NotifyDataSourceStartedResponse(NotifyDataSourceStartedResponse&&) noexcept;
|
|
NotifyDataSourceStartedResponse& operator=(NotifyDataSourceStartedResponse&&);
|
|
NotifyDataSourceStartedResponse(const NotifyDataSourceStartedResponse&);
|
|
NotifyDataSourceStartedResponse& operator=(const NotifyDataSourceStartedResponse&);
|
|
bool operator==(const NotifyDataSourceStartedResponse&) const;
|
|
bool operator!=(const NotifyDataSourceStartedResponse& other) const { return !(*this == other); }
|
|
|
|
bool ParseFromArray(const void*, size_t) override;
|
|
std::string SerializeAsString() const override;
|
|
std::vector<uint8_t> SerializeAsArray() const override;
|
|
void Serialize(::protozero::Message*) const;
|
|
|
|
private:
|
|
|
|
// Allows to preserve unknown protobuf fields for compatibility
|
|
// with future versions of .proto files.
|
|
std::string unknown_fields_;
|
|
|
|
std::bitset<2> _has_field_{};
|
|
};
|
|
|
|
|
|
class PERFETTO_EXPORT NotifyDataSourceStartedRequest : public ::protozero::CppMessageObj {
|
|
public:
|
|
enum FieldNumbers {
|
|
kDataSourceIdFieldNumber = 1,
|
|
};
|
|
|
|
NotifyDataSourceStartedRequest();
|
|
~NotifyDataSourceStartedRequest() override;
|
|
NotifyDataSourceStartedRequest(NotifyDataSourceStartedRequest&&) noexcept;
|
|
NotifyDataSourceStartedRequest& operator=(NotifyDataSourceStartedRequest&&);
|
|
NotifyDataSourceStartedRequest(const NotifyDataSourceStartedRequest&);
|
|
NotifyDataSourceStartedRequest& operator=(const NotifyDataSourceStartedRequest&);
|
|
bool operator==(const NotifyDataSourceStartedRequest&) const;
|
|
bool operator!=(const NotifyDataSourceStartedRequest& other) const { return !(*this == other); }
|
|
|
|
bool ParseFromArray(const void*, size_t) override;
|
|
std::string SerializeAsString() const override;
|
|
std::vector<uint8_t> SerializeAsArray() const override;
|
|
void Serialize(::protozero::Message*) const;
|
|
|
|
bool has_data_source_id() const { return _has_field_[1]; }
|
|
uint64_t data_source_id() const { return data_source_id_; }
|
|
void set_data_source_id(uint64_t value) { data_source_id_ = value; _has_field_.set(1); }
|
|
|
|
private:
|
|
uint64_t data_source_id_{};
|
|
|
|
// Allows to preserve unknown protobuf fields for compatibility
|
|
// with future versions of .proto files.
|
|
std::string unknown_fields_;
|
|
|
|
std::bitset<2> _has_field_{};
|
|
};
|
|
|
|
|
|
class PERFETTO_EXPORT CommitDataResponse : public ::protozero::CppMessageObj {
|
|
public:
|
|
enum FieldNumbers {
|
|
};
|
|
|
|
CommitDataResponse();
|
|
~CommitDataResponse() override;
|
|
CommitDataResponse(CommitDataResponse&&) noexcept;
|
|
CommitDataResponse& operator=(CommitDataResponse&&);
|
|
CommitDataResponse(const CommitDataResponse&);
|
|
CommitDataResponse& operator=(const CommitDataResponse&);
|
|
bool operator==(const CommitDataResponse&) const;
|
|
bool operator!=(const CommitDataResponse& other) const { return !(*this == other); }
|
|
|
|
bool ParseFromArray(const void*, size_t) override;
|
|
std::string SerializeAsString() const override;
|
|
std::vector<uint8_t> SerializeAsArray() const override;
|
|
void Serialize(::protozero::Message*) const;
|
|
|
|
private:
|
|
|
|
// Allows to preserve unknown protobuf fields for compatibility
|
|
// with future versions of .proto files.
|
|
std::string unknown_fields_;
|
|
|
|
std::bitset<2> _has_field_{};
|
|
};
|
|
|
|
|
|
class PERFETTO_EXPORT UnregisterTraceWriterResponse : public ::protozero::CppMessageObj {
|
|
public:
|
|
enum FieldNumbers {
|
|
};
|
|
|
|
UnregisterTraceWriterResponse();
|
|
~UnregisterTraceWriterResponse() override;
|
|
UnregisterTraceWriterResponse(UnregisterTraceWriterResponse&&) noexcept;
|
|
UnregisterTraceWriterResponse& operator=(UnregisterTraceWriterResponse&&);
|
|
UnregisterTraceWriterResponse(const UnregisterTraceWriterResponse&);
|
|
UnregisterTraceWriterResponse& operator=(const UnregisterTraceWriterResponse&);
|
|
bool operator==(const UnregisterTraceWriterResponse&) const;
|
|
bool operator!=(const UnregisterTraceWriterResponse& other) const { return !(*this == other); }
|
|
|
|
bool ParseFromArray(const void*, size_t) override;
|
|
std::string SerializeAsString() const override;
|
|
std::vector<uint8_t> SerializeAsArray() const override;
|
|
void Serialize(::protozero::Message*) const;
|
|
|
|
private:
|
|
|
|
// Allows to preserve unknown protobuf fields for compatibility
|
|
// with future versions of .proto files.
|
|
std::string unknown_fields_;
|
|
|
|
std::bitset<2> _has_field_{};
|
|
};
|
|
|
|
|
|
class PERFETTO_EXPORT UnregisterTraceWriterRequest : public ::protozero::CppMessageObj {
|
|
public:
|
|
enum FieldNumbers {
|
|
kTraceWriterIdFieldNumber = 1,
|
|
};
|
|
|
|
UnregisterTraceWriterRequest();
|
|
~UnregisterTraceWriterRequest() override;
|
|
UnregisterTraceWriterRequest(UnregisterTraceWriterRequest&&) noexcept;
|
|
UnregisterTraceWriterRequest& operator=(UnregisterTraceWriterRequest&&);
|
|
UnregisterTraceWriterRequest(const UnregisterTraceWriterRequest&);
|
|
UnregisterTraceWriterRequest& operator=(const UnregisterTraceWriterRequest&);
|
|
bool operator==(const UnregisterTraceWriterRequest&) const;
|
|
bool operator!=(const UnregisterTraceWriterRequest& other) const { return !(*this == other); }
|
|
|
|
bool ParseFromArray(const void*, size_t) override;
|
|
std::string SerializeAsString() const override;
|
|
std::vector<uint8_t> SerializeAsArray() const override;
|
|
void Serialize(::protozero::Message*) const;
|
|
|
|
bool has_trace_writer_id() const { return _has_field_[1]; }
|
|
uint32_t trace_writer_id() const { return trace_writer_id_; }
|
|
void set_trace_writer_id(uint32_t value) { trace_writer_id_ = value; _has_field_.set(1); }
|
|
|
|
private:
|
|
uint32_t trace_writer_id_{};
|
|
|
|
// Allows to preserve unknown protobuf fields for compatibility
|
|
// with future versions of .proto files.
|
|
std::string unknown_fields_;
|
|
|
|
std::bitset<2> _has_field_{};
|
|
};
|
|
|
|
|
|
class PERFETTO_EXPORT RegisterTraceWriterResponse : public ::protozero::CppMessageObj {
|
|
public:
|
|
enum FieldNumbers {
|
|
};
|
|
|
|
RegisterTraceWriterResponse();
|
|
~RegisterTraceWriterResponse() override;
|
|
RegisterTraceWriterResponse(RegisterTraceWriterResponse&&) noexcept;
|
|
RegisterTraceWriterResponse& operator=(RegisterTraceWriterResponse&&);
|
|
RegisterTraceWriterResponse(const RegisterTraceWriterResponse&);
|
|
RegisterTraceWriterResponse& operator=(const RegisterTraceWriterResponse&);
|
|
bool operator==(const RegisterTraceWriterResponse&) const;
|
|
bool operator!=(const RegisterTraceWriterResponse& other) const { return !(*this == other); }
|
|
|
|
bool ParseFromArray(const void*, size_t) override;
|
|
std::string SerializeAsString() const override;
|
|
std::vector<uint8_t> SerializeAsArray() const override;
|
|
void Serialize(::protozero::Message*) const;
|
|
|
|
private:
|
|
|
|
// Allows to preserve unknown protobuf fields for compatibility
|
|
// with future versions of .proto files.
|
|
std::string unknown_fields_;
|
|
|
|
std::bitset<2> _has_field_{};
|
|
};
|
|
|
|
|
|
class PERFETTO_EXPORT RegisterTraceWriterRequest : public ::protozero::CppMessageObj {
|
|
public:
|
|
enum FieldNumbers {
|
|
kTraceWriterIdFieldNumber = 1,
|
|
kTargetBufferFieldNumber = 2,
|
|
};
|
|
|
|
RegisterTraceWriterRequest();
|
|
~RegisterTraceWriterRequest() override;
|
|
RegisterTraceWriterRequest(RegisterTraceWriterRequest&&) noexcept;
|
|
RegisterTraceWriterRequest& operator=(RegisterTraceWriterRequest&&);
|
|
RegisterTraceWriterRequest(const RegisterTraceWriterRequest&);
|
|
RegisterTraceWriterRequest& operator=(const RegisterTraceWriterRequest&);
|
|
bool operator==(const RegisterTraceWriterRequest&) const;
|
|
bool operator!=(const RegisterTraceWriterRequest& other) const { return !(*this == other); }
|
|
|
|
bool ParseFromArray(const void*, size_t) override;
|
|
std::string SerializeAsString() const override;
|
|
std::vector<uint8_t> SerializeAsArray() const override;
|
|
void Serialize(::protozero::Message*) const;
|
|
|
|
bool has_trace_writer_id() const { return _has_field_[1]; }
|
|
uint32_t trace_writer_id() const { return trace_writer_id_; }
|
|
void set_trace_writer_id(uint32_t value) { trace_writer_id_ = value; _has_field_.set(1); }
|
|
|
|
bool has_target_buffer() const { return _has_field_[2]; }
|
|
uint32_t target_buffer() const { return target_buffer_; }
|
|
void set_target_buffer(uint32_t value) { target_buffer_ = value; _has_field_.set(2); }
|
|
|
|
private:
|
|
uint32_t trace_writer_id_{};
|
|
uint32_t target_buffer_{};
|
|
|
|
// Allows to preserve unknown protobuf fields for compatibility
|
|
// with future versions of .proto files.
|
|
std::string unknown_fields_;
|
|
|
|
std::bitset<3> _has_field_{};
|
|
};
|
|
|
|
|
|
class PERFETTO_EXPORT UnregisterDataSourceResponse : public ::protozero::CppMessageObj {
|
|
public:
|
|
enum FieldNumbers {
|
|
};
|
|
|
|
UnregisterDataSourceResponse();
|
|
~UnregisterDataSourceResponse() override;
|
|
UnregisterDataSourceResponse(UnregisterDataSourceResponse&&) noexcept;
|
|
UnregisterDataSourceResponse& operator=(UnregisterDataSourceResponse&&);
|
|
UnregisterDataSourceResponse(const UnregisterDataSourceResponse&);
|
|
UnregisterDataSourceResponse& operator=(const UnregisterDataSourceResponse&);
|
|
bool operator==(const UnregisterDataSourceResponse&) const;
|
|
bool operator!=(const UnregisterDataSourceResponse& other) const { return !(*this == other); }
|
|
|
|
bool ParseFromArray(const void*, size_t) override;
|
|
std::string SerializeAsString() const override;
|
|
std::vector<uint8_t> SerializeAsArray() const override;
|
|
void Serialize(::protozero::Message*) const;
|
|
|
|
private:
|
|
|
|
// Allows to preserve unknown protobuf fields for compatibility
|
|
// with future versions of .proto files.
|
|
std::string unknown_fields_;
|
|
|
|
std::bitset<2> _has_field_{};
|
|
};
|
|
|
|
|
|
class PERFETTO_EXPORT UnregisterDataSourceRequest : public ::protozero::CppMessageObj {
|
|
public:
|
|
enum FieldNumbers {
|
|
kDataSourceNameFieldNumber = 1,
|
|
};
|
|
|
|
UnregisterDataSourceRequest();
|
|
~UnregisterDataSourceRequest() override;
|
|
UnregisterDataSourceRequest(UnregisterDataSourceRequest&&) noexcept;
|
|
UnregisterDataSourceRequest& operator=(UnregisterDataSourceRequest&&);
|
|
UnregisterDataSourceRequest(const UnregisterDataSourceRequest&);
|
|
UnregisterDataSourceRequest& operator=(const UnregisterDataSourceRequest&);
|
|
bool operator==(const UnregisterDataSourceRequest&) const;
|
|
bool operator!=(const UnregisterDataSourceRequest& other) const { return !(*this == other); }
|
|
|
|
bool ParseFromArray(const void*, size_t) override;
|
|
std::string SerializeAsString() const override;
|
|
std::vector<uint8_t> SerializeAsArray() const override;
|
|
void Serialize(::protozero::Message*) const;
|
|
|
|
bool has_data_source_name() const { return _has_field_[1]; }
|
|
const std::string& data_source_name() const { return data_source_name_; }
|
|
void set_data_source_name(const std::string& value) { data_source_name_ = value; _has_field_.set(1); }
|
|
|
|
private:
|
|
std::string data_source_name_{};
|
|
|
|
// Allows to preserve unknown protobuf fields for compatibility
|
|
// with future versions of .proto files.
|
|
std::string unknown_fields_;
|
|
|
|
std::bitset<2> _has_field_{};
|
|
};
|
|
|
|
|
|
class PERFETTO_EXPORT RegisterDataSourceResponse : public ::protozero::CppMessageObj {
|
|
public:
|
|
enum FieldNumbers {
|
|
kErrorFieldNumber = 1,
|
|
};
|
|
|
|
RegisterDataSourceResponse();
|
|
~RegisterDataSourceResponse() override;
|
|
RegisterDataSourceResponse(RegisterDataSourceResponse&&) noexcept;
|
|
RegisterDataSourceResponse& operator=(RegisterDataSourceResponse&&);
|
|
RegisterDataSourceResponse(const RegisterDataSourceResponse&);
|
|
RegisterDataSourceResponse& operator=(const RegisterDataSourceResponse&);
|
|
bool operator==(const RegisterDataSourceResponse&) const;
|
|
bool operator!=(const RegisterDataSourceResponse& other) const { return !(*this == other); }
|
|
|
|
bool ParseFromArray(const void*, size_t) override;
|
|
std::string SerializeAsString() const override;
|
|
std::vector<uint8_t> SerializeAsArray() const override;
|
|
void Serialize(::protozero::Message*) const;
|
|
|
|
bool has_error() const { return _has_field_[1]; }
|
|
const std::string& error() const { return error_; }
|
|
void set_error(const std::string& value) { error_ = value; _has_field_.set(1); }
|
|
|
|
private:
|
|
std::string error_{};
|
|
|
|
// Allows to preserve unknown protobuf fields for compatibility
|
|
// with future versions of .proto files.
|
|
std::string unknown_fields_;
|
|
|
|
std::bitset<2> _has_field_{};
|
|
};
|
|
|
|
|
|
class PERFETTO_EXPORT RegisterDataSourceRequest : public ::protozero::CppMessageObj {
|
|
public:
|
|
enum FieldNumbers {
|
|
kDataSourceDescriptorFieldNumber = 1,
|
|
};
|
|
|
|
RegisterDataSourceRequest();
|
|
~RegisterDataSourceRequest() override;
|
|
RegisterDataSourceRequest(RegisterDataSourceRequest&&) noexcept;
|
|
RegisterDataSourceRequest& operator=(RegisterDataSourceRequest&&);
|
|
RegisterDataSourceRequest(const RegisterDataSourceRequest&);
|
|
RegisterDataSourceRequest& operator=(const RegisterDataSourceRequest&);
|
|
bool operator==(const RegisterDataSourceRequest&) const;
|
|
bool operator!=(const RegisterDataSourceRequest& other) const { return !(*this == other); }
|
|
|
|
bool ParseFromArray(const void*, size_t) override;
|
|
std::string SerializeAsString() const override;
|
|
std::vector<uint8_t> SerializeAsArray() const override;
|
|
void Serialize(::protozero::Message*) const;
|
|
|
|
bool has_data_source_descriptor() const { return _has_field_[1]; }
|
|
const DataSourceDescriptor& data_source_descriptor() const { return *data_source_descriptor_; }
|
|
DataSourceDescriptor* mutable_data_source_descriptor() { _has_field_.set(1); return data_source_descriptor_.get(); }
|
|
|
|
private:
|
|
::protozero::CopyablePtr<DataSourceDescriptor> data_source_descriptor_;
|
|
|
|
// Allows to preserve unknown protobuf fields for compatibility
|
|
// with future versions of .proto files.
|
|
std::string unknown_fields_;
|
|
|
|
std::bitset<2> _has_field_{};
|
|
};
|
|
|
|
|
|
class PERFETTO_EXPORT InitializeConnectionResponse : public ::protozero::CppMessageObj {
|
|
public:
|
|
enum FieldNumbers {
|
|
kUsingShmemProvidedByProducerFieldNumber = 1,
|
|
};
|
|
|
|
InitializeConnectionResponse();
|
|
~InitializeConnectionResponse() override;
|
|
InitializeConnectionResponse(InitializeConnectionResponse&&) noexcept;
|
|
InitializeConnectionResponse& operator=(InitializeConnectionResponse&&);
|
|
InitializeConnectionResponse(const InitializeConnectionResponse&);
|
|
InitializeConnectionResponse& operator=(const InitializeConnectionResponse&);
|
|
bool operator==(const InitializeConnectionResponse&) const;
|
|
bool operator!=(const InitializeConnectionResponse& other) const { return !(*this == other); }
|
|
|
|
bool ParseFromArray(const void*, size_t) override;
|
|
std::string SerializeAsString() const override;
|
|
std::vector<uint8_t> SerializeAsArray() const override;
|
|
void Serialize(::protozero::Message*) const;
|
|
|
|
bool has_using_shmem_provided_by_producer() const { return _has_field_[1]; }
|
|
bool using_shmem_provided_by_producer() const { return using_shmem_provided_by_producer_; }
|
|
void set_using_shmem_provided_by_producer(bool value) { using_shmem_provided_by_producer_ = value; _has_field_.set(1); }
|
|
|
|
private:
|
|
bool using_shmem_provided_by_producer_{};
|
|
|
|
// Allows to preserve unknown protobuf fields for compatibility
|
|
// with future versions of .proto files.
|
|
std::string unknown_fields_;
|
|
|
|
std::bitset<2> _has_field_{};
|
|
};
|
|
|
|
|
|
class PERFETTO_EXPORT InitializeConnectionRequest : public ::protozero::CppMessageObj {
|
|
public:
|
|
using ProducerSMBScrapingMode = InitializeConnectionRequest_ProducerSMBScrapingMode;
|
|
static constexpr auto SMB_SCRAPING_UNSPECIFIED = InitializeConnectionRequest_ProducerSMBScrapingMode_SMB_SCRAPING_UNSPECIFIED;
|
|
static constexpr auto SMB_SCRAPING_ENABLED = InitializeConnectionRequest_ProducerSMBScrapingMode_SMB_SCRAPING_ENABLED;
|
|
static constexpr auto SMB_SCRAPING_DISABLED = InitializeConnectionRequest_ProducerSMBScrapingMode_SMB_SCRAPING_DISABLED;
|
|
static constexpr auto ProducerSMBScrapingMode_MIN = InitializeConnectionRequest_ProducerSMBScrapingMode_SMB_SCRAPING_UNSPECIFIED;
|
|
static constexpr auto ProducerSMBScrapingMode_MAX = InitializeConnectionRequest_ProducerSMBScrapingMode_SMB_SCRAPING_DISABLED;
|
|
using ProducerBuildFlags = InitializeConnectionRequest_ProducerBuildFlags;
|
|
static constexpr auto BUILD_FLAGS_UNSPECIFIED = InitializeConnectionRequest_ProducerBuildFlags_BUILD_FLAGS_UNSPECIFIED;
|
|
static constexpr auto BUILD_FLAGS_DCHECKS_ON = InitializeConnectionRequest_ProducerBuildFlags_BUILD_FLAGS_DCHECKS_ON;
|
|
static constexpr auto BUILD_FLAGS_DCHECKS_OFF = InitializeConnectionRequest_ProducerBuildFlags_BUILD_FLAGS_DCHECKS_OFF;
|
|
static constexpr auto ProducerBuildFlags_MIN = InitializeConnectionRequest_ProducerBuildFlags_BUILD_FLAGS_UNSPECIFIED;
|
|
static constexpr auto ProducerBuildFlags_MAX = InitializeConnectionRequest_ProducerBuildFlags_BUILD_FLAGS_DCHECKS_OFF;
|
|
enum FieldNumbers {
|
|
kSharedMemoryPageSizeHintBytesFieldNumber = 1,
|
|
kSharedMemorySizeHintBytesFieldNumber = 2,
|
|
kProducerNameFieldNumber = 3,
|
|
kSmbScrapingModeFieldNumber = 4,
|
|
kBuildFlagsFieldNumber = 5,
|
|
kProducerProvidedShmemFieldNumber = 6,
|
|
};
|
|
|
|
InitializeConnectionRequest();
|
|
~InitializeConnectionRequest() override;
|
|
InitializeConnectionRequest(InitializeConnectionRequest&&) noexcept;
|
|
InitializeConnectionRequest& operator=(InitializeConnectionRequest&&);
|
|
InitializeConnectionRequest(const InitializeConnectionRequest&);
|
|
InitializeConnectionRequest& operator=(const InitializeConnectionRequest&);
|
|
bool operator==(const InitializeConnectionRequest&) const;
|
|
bool operator!=(const InitializeConnectionRequest& other) const { return !(*this == other); }
|
|
|
|
bool ParseFromArray(const void*, size_t) override;
|
|
std::string SerializeAsString() const override;
|
|
std::vector<uint8_t> SerializeAsArray() const override;
|
|
void Serialize(::protozero::Message*) const;
|
|
|
|
bool has_shared_memory_page_size_hint_bytes() const { return _has_field_[1]; }
|
|
uint32_t shared_memory_page_size_hint_bytes() const { return shared_memory_page_size_hint_bytes_; }
|
|
void set_shared_memory_page_size_hint_bytes(uint32_t value) { shared_memory_page_size_hint_bytes_ = value; _has_field_.set(1); }
|
|
|
|
bool has_shared_memory_size_hint_bytes() const { return _has_field_[2]; }
|
|
uint32_t shared_memory_size_hint_bytes() const { return shared_memory_size_hint_bytes_; }
|
|
void set_shared_memory_size_hint_bytes(uint32_t value) { shared_memory_size_hint_bytes_ = value; _has_field_.set(2); }
|
|
|
|
bool has_producer_name() const { return _has_field_[3]; }
|
|
const std::string& producer_name() const { return producer_name_; }
|
|
void set_producer_name(const std::string& value) { producer_name_ = value; _has_field_.set(3); }
|
|
|
|
bool has_smb_scraping_mode() const { return _has_field_[4]; }
|
|
InitializeConnectionRequest_ProducerSMBScrapingMode smb_scraping_mode() const { return smb_scraping_mode_; }
|
|
void set_smb_scraping_mode(InitializeConnectionRequest_ProducerSMBScrapingMode value) { smb_scraping_mode_ = value; _has_field_.set(4); }
|
|
|
|
bool has_build_flags() const { return _has_field_[5]; }
|
|
InitializeConnectionRequest_ProducerBuildFlags build_flags() const { return build_flags_; }
|
|
void set_build_flags(InitializeConnectionRequest_ProducerBuildFlags value) { build_flags_ = value; _has_field_.set(5); }
|
|
|
|
bool has_producer_provided_shmem() const { return _has_field_[6]; }
|
|
bool producer_provided_shmem() const { return producer_provided_shmem_; }
|
|
void set_producer_provided_shmem(bool value) { producer_provided_shmem_ = value; _has_field_.set(6); }
|
|
|
|
private:
|
|
uint32_t shared_memory_page_size_hint_bytes_{};
|
|
uint32_t shared_memory_size_hint_bytes_{};
|
|
std::string producer_name_{};
|
|
InitializeConnectionRequest_ProducerSMBScrapingMode smb_scraping_mode_{};
|
|
InitializeConnectionRequest_ProducerBuildFlags build_flags_{};
|
|
bool producer_provided_shmem_{};
|
|
|
|
// Allows to preserve unknown protobuf fields for compatibility
|
|
// with future versions of .proto files.
|
|
std::string unknown_fields_;
|
|
|
|
std::bitset<7> _has_field_{};
|
|
};
|
|
|
|
} // namespace perfetto
|
|
} // namespace protos
|
|
} // namespace gen
|
|
|
|
#endif // PERFETTO_PROTOS_PROTOS_PERFETTO_IPC_PRODUCER_PORT_PROTO_CPP_H_
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/scattered_heap_buffer.h"
|
|
// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
|
|
#pragma GCC diagnostic push
|
|
#pragma GCC diagnostic ignored "-Wfloat-equal"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/ipc/producer_port.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/common/data_source_descriptor.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/common/track_event_descriptor.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/common/gpu_counter_descriptor.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/data_source_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/track_event/track_event_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/test_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/sys_stats/sys_stats_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/common/sys_stats_counters.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/profiling/perf_event_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/profiling/java_hprof_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/profiling/heapprofd_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/process_stats/process_stats_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/power/android_power_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/inode_file/inode_file_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/gpu/vulkan_memory_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/gpu/gpu_counter_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/ftrace/ftrace_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/chrome/chrome_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/android/packages_list_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/android/android_polled_state_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/android/android_log_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/common/android_log_constants.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/common/commit_data_request.gen.h"
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace gen {
|
|
|
|
SyncResponse::SyncResponse() = default;
|
|
SyncResponse::~SyncResponse() = default;
|
|
SyncResponse::SyncResponse(const SyncResponse&) = default;
|
|
SyncResponse& SyncResponse::operator=(const SyncResponse&) = default;
|
|
SyncResponse::SyncResponse(SyncResponse&&) noexcept = default;
|
|
SyncResponse& SyncResponse::operator=(SyncResponse&&) = default;
|
|
|
|
bool SyncResponse::operator==(const SyncResponse& other) const {
|
|
return unknown_fields_ == other.unknown_fields_;
|
|
}
|
|
|
|
bool SyncResponse::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string SyncResponse::SerializeAsString() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> SyncResponse::SerializeAsArray() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void SyncResponse::Serialize(::protozero::Message* msg) const {
|
|
msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
|
|
}
|
|
|
|
|
|
SyncRequest::SyncRequest() = default;
|
|
SyncRequest::~SyncRequest() = default;
|
|
SyncRequest::SyncRequest(const SyncRequest&) = default;
|
|
SyncRequest& SyncRequest::operator=(const SyncRequest&) = default;
|
|
SyncRequest::SyncRequest(SyncRequest&&) noexcept = default;
|
|
SyncRequest& SyncRequest::operator=(SyncRequest&&) = default;
|
|
|
|
bool SyncRequest::operator==(const SyncRequest& other) const {
|
|
return unknown_fields_ == other.unknown_fields_;
|
|
}
|
|
|
|
bool SyncRequest::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string SyncRequest::SerializeAsString() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> SyncRequest::SerializeAsArray() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void SyncRequest::Serialize(::protozero::Message* msg) const {
|
|
msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
|
|
}
|
|
|
|
|
|
GetAsyncCommandResponse::GetAsyncCommandResponse() = default;
|
|
GetAsyncCommandResponse::~GetAsyncCommandResponse() = default;
|
|
GetAsyncCommandResponse::GetAsyncCommandResponse(const GetAsyncCommandResponse&) = default;
|
|
GetAsyncCommandResponse& GetAsyncCommandResponse::operator=(const GetAsyncCommandResponse&) = default;
|
|
GetAsyncCommandResponse::GetAsyncCommandResponse(GetAsyncCommandResponse&&) noexcept = default;
|
|
GetAsyncCommandResponse& GetAsyncCommandResponse::operator=(GetAsyncCommandResponse&&) = default;
|
|
|
|
bool GetAsyncCommandResponse::operator==(const GetAsyncCommandResponse& other) const {
|
|
return unknown_fields_ == other.unknown_fields_
|
|
&& setup_tracing_ == other.setup_tracing_
|
|
&& setup_data_source_ == other.setup_data_source_
|
|
&& start_data_source_ == other.start_data_source_
|
|
&& stop_data_source_ == other.stop_data_source_
|
|
&& flush_ == other.flush_
|
|
&& clear_incremental_state_ == other.clear_incremental_state_;
|
|
}
|
|
|
|
bool GetAsyncCommandResponse::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 3 /* setup_tracing */:
|
|
(*setup_tracing_).ParseFromString(field.as_std_string());
|
|
break;
|
|
case 6 /* setup_data_source */:
|
|
(*setup_data_source_).ParseFromString(field.as_std_string());
|
|
break;
|
|
case 1 /* start_data_source */:
|
|
(*start_data_source_).ParseFromString(field.as_std_string());
|
|
break;
|
|
case 2 /* stop_data_source */:
|
|
(*stop_data_source_).ParseFromString(field.as_std_string());
|
|
break;
|
|
case 5 /* flush */:
|
|
(*flush_).ParseFromString(field.as_std_string());
|
|
break;
|
|
case 7 /* clear_incremental_state */:
|
|
(*clear_incremental_state_).ParseFromString(field.as_std_string());
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string GetAsyncCommandResponse::SerializeAsString() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> GetAsyncCommandResponse::SerializeAsArray() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void GetAsyncCommandResponse::Serialize(::protozero::Message* msg) const {
|
|
// Field 3: setup_tracing
|
|
if (_has_field_[3]) {
|
|
(*setup_tracing_).Serialize(msg->BeginNestedMessage<::protozero::Message>(3));
|
|
}
|
|
|
|
// Field 6: setup_data_source
|
|
if (_has_field_[6]) {
|
|
(*setup_data_source_).Serialize(msg->BeginNestedMessage<::protozero::Message>(6));
|
|
}
|
|
|
|
// Field 1: start_data_source
|
|
if (_has_field_[1]) {
|
|
(*start_data_source_).Serialize(msg->BeginNestedMessage<::protozero::Message>(1));
|
|
}
|
|
|
|
// Field 2: stop_data_source
|
|
if (_has_field_[2]) {
|
|
(*stop_data_source_).Serialize(msg->BeginNestedMessage<::protozero::Message>(2));
|
|
}
|
|
|
|
// Field 5: flush
|
|
if (_has_field_[5]) {
|
|
(*flush_).Serialize(msg->BeginNestedMessage<::protozero::Message>(5));
|
|
}
|
|
|
|
// Field 7: clear_incremental_state
|
|
if (_has_field_[7]) {
|
|
(*clear_incremental_state_).Serialize(msg->BeginNestedMessage<::protozero::Message>(7));
|
|
}
|
|
|
|
msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
|
|
}
|
|
|
|
|
|
GetAsyncCommandResponse_ClearIncrementalState::GetAsyncCommandResponse_ClearIncrementalState() = default;
|
|
GetAsyncCommandResponse_ClearIncrementalState::~GetAsyncCommandResponse_ClearIncrementalState() = default;
|
|
GetAsyncCommandResponse_ClearIncrementalState::GetAsyncCommandResponse_ClearIncrementalState(const GetAsyncCommandResponse_ClearIncrementalState&) = default;
|
|
GetAsyncCommandResponse_ClearIncrementalState& GetAsyncCommandResponse_ClearIncrementalState::operator=(const GetAsyncCommandResponse_ClearIncrementalState&) = default;
|
|
GetAsyncCommandResponse_ClearIncrementalState::GetAsyncCommandResponse_ClearIncrementalState(GetAsyncCommandResponse_ClearIncrementalState&&) noexcept = default;
|
|
GetAsyncCommandResponse_ClearIncrementalState& GetAsyncCommandResponse_ClearIncrementalState::operator=(GetAsyncCommandResponse_ClearIncrementalState&&) = default;
|
|
|
|
bool GetAsyncCommandResponse_ClearIncrementalState::operator==(const GetAsyncCommandResponse_ClearIncrementalState& other) const {
|
|
return unknown_fields_ == other.unknown_fields_
|
|
&& data_source_ids_ == other.data_source_ids_;
|
|
}
|
|
|
|
bool GetAsyncCommandResponse_ClearIncrementalState::ParseFromArray(const void* raw, size_t size) {
|
|
data_source_ids_.clear();
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* data_source_ids */:
|
|
data_source_ids_.emplace_back();
|
|
field.get(&data_source_ids_.back());
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string GetAsyncCommandResponse_ClearIncrementalState::SerializeAsString() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> GetAsyncCommandResponse_ClearIncrementalState::SerializeAsArray() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void GetAsyncCommandResponse_ClearIncrementalState::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: data_source_ids
|
|
for (auto& it : data_source_ids_) {
|
|
msg->AppendVarInt(1, it);
|
|
}
|
|
|
|
msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
|
|
}
|
|
|
|
|
|
GetAsyncCommandResponse_Flush::GetAsyncCommandResponse_Flush() = default;
|
|
GetAsyncCommandResponse_Flush::~GetAsyncCommandResponse_Flush() = default;
|
|
GetAsyncCommandResponse_Flush::GetAsyncCommandResponse_Flush(const GetAsyncCommandResponse_Flush&) = default;
|
|
GetAsyncCommandResponse_Flush& GetAsyncCommandResponse_Flush::operator=(const GetAsyncCommandResponse_Flush&) = default;
|
|
GetAsyncCommandResponse_Flush::GetAsyncCommandResponse_Flush(GetAsyncCommandResponse_Flush&&) noexcept = default;
|
|
GetAsyncCommandResponse_Flush& GetAsyncCommandResponse_Flush::operator=(GetAsyncCommandResponse_Flush&&) = default;
|
|
|
|
bool GetAsyncCommandResponse_Flush::operator==(const GetAsyncCommandResponse_Flush& other) const {
|
|
return unknown_fields_ == other.unknown_fields_
|
|
&& data_source_ids_ == other.data_source_ids_
|
|
&& request_id_ == other.request_id_;
|
|
}
|
|
|
|
bool GetAsyncCommandResponse_Flush::ParseFromArray(const void* raw, size_t size) {
|
|
data_source_ids_.clear();
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* data_source_ids */:
|
|
data_source_ids_.emplace_back();
|
|
field.get(&data_source_ids_.back());
|
|
break;
|
|
case 2 /* request_id */:
|
|
field.get(&request_id_);
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string GetAsyncCommandResponse_Flush::SerializeAsString() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> GetAsyncCommandResponse_Flush::SerializeAsArray() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void GetAsyncCommandResponse_Flush::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: data_source_ids
|
|
for (auto& it : data_source_ids_) {
|
|
msg->AppendVarInt(1, it);
|
|
}
|
|
|
|
// Field 2: request_id
|
|
if (_has_field_[2]) {
|
|
msg->AppendVarInt(2, request_id_);
|
|
}
|
|
|
|
msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
|
|
}
|
|
|
|
|
|
GetAsyncCommandResponse_StopDataSource::GetAsyncCommandResponse_StopDataSource() = default;
|
|
GetAsyncCommandResponse_StopDataSource::~GetAsyncCommandResponse_StopDataSource() = default;
|
|
GetAsyncCommandResponse_StopDataSource::GetAsyncCommandResponse_StopDataSource(const GetAsyncCommandResponse_StopDataSource&) = default;
|
|
GetAsyncCommandResponse_StopDataSource& GetAsyncCommandResponse_StopDataSource::operator=(const GetAsyncCommandResponse_StopDataSource&) = default;
|
|
GetAsyncCommandResponse_StopDataSource::GetAsyncCommandResponse_StopDataSource(GetAsyncCommandResponse_StopDataSource&&) noexcept = default;
|
|
GetAsyncCommandResponse_StopDataSource& GetAsyncCommandResponse_StopDataSource::operator=(GetAsyncCommandResponse_StopDataSource&&) = default;
|
|
|
|
bool GetAsyncCommandResponse_StopDataSource::operator==(const GetAsyncCommandResponse_StopDataSource& other) const {
|
|
return unknown_fields_ == other.unknown_fields_
|
|
&& instance_id_ == other.instance_id_;
|
|
}
|
|
|
|
bool GetAsyncCommandResponse_StopDataSource::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* instance_id */:
|
|
field.get(&instance_id_);
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string GetAsyncCommandResponse_StopDataSource::SerializeAsString() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> GetAsyncCommandResponse_StopDataSource::SerializeAsArray() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void GetAsyncCommandResponse_StopDataSource::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: instance_id
|
|
if (_has_field_[1]) {
|
|
msg->AppendVarInt(1, instance_id_);
|
|
}
|
|
|
|
msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
|
|
}
|
|
|
|
|
|
GetAsyncCommandResponse_StartDataSource::GetAsyncCommandResponse_StartDataSource() = default;
|
|
GetAsyncCommandResponse_StartDataSource::~GetAsyncCommandResponse_StartDataSource() = default;
|
|
GetAsyncCommandResponse_StartDataSource::GetAsyncCommandResponse_StartDataSource(const GetAsyncCommandResponse_StartDataSource&) = default;
|
|
GetAsyncCommandResponse_StartDataSource& GetAsyncCommandResponse_StartDataSource::operator=(const GetAsyncCommandResponse_StartDataSource&) = default;
|
|
GetAsyncCommandResponse_StartDataSource::GetAsyncCommandResponse_StartDataSource(GetAsyncCommandResponse_StartDataSource&&) noexcept = default;
|
|
GetAsyncCommandResponse_StartDataSource& GetAsyncCommandResponse_StartDataSource::operator=(GetAsyncCommandResponse_StartDataSource&&) = default;
|
|
|
|
bool GetAsyncCommandResponse_StartDataSource::operator==(const GetAsyncCommandResponse_StartDataSource& other) const {
|
|
return unknown_fields_ == other.unknown_fields_
|
|
&& new_instance_id_ == other.new_instance_id_
|
|
&& config_ == other.config_;
|
|
}
|
|
|
|
bool GetAsyncCommandResponse_StartDataSource::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* new_instance_id */:
|
|
field.get(&new_instance_id_);
|
|
break;
|
|
case 2 /* config */:
|
|
(*config_).ParseFromString(field.as_std_string());
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string GetAsyncCommandResponse_StartDataSource::SerializeAsString() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> GetAsyncCommandResponse_StartDataSource::SerializeAsArray() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void GetAsyncCommandResponse_StartDataSource::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: new_instance_id
|
|
if (_has_field_[1]) {
|
|
msg->AppendVarInt(1, new_instance_id_);
|
|
}
|
|
|
|
// Field 2: config
|
|
if (_has_field_[2]) {
|
|
(*config_).Serialize(msg->BeginNestedMessage<::protozero::Message>(2));
|
|
}
|
|
|
|
msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
|
|
}
|
|
|
|
|
|
GetAsyncCommandResponse_SetupDataSource::GetAsyncCommandResponse_SetupDataSource() = default;
|
|
GetAsyncCommandResponse_SetupDataSource::~GetAsyncCommandResponse_SetupDataSource() = default;
|
|
GetAsyncCommandResponse_SetupDataSource::GetAsyncCommandResponse_SetupDataSource(const GetAsyncCommandResponse_SetupDataSource&) = default;
|
|
GetAsyncCommandResponse_SetupDataSource& GetAsyncCommandResponse_SetupDataSource::operator=(const GetAsyncCommandResponse_SetupDataSource&) = default;
|
|
GetAsyncCommandResponse_SetupDataSource::GetAsyncCommandResponse_SetupDataSource(GetAsyncCommandResponse_SetupDataSource&&) noexcept = default;
|
|
GetAsyncCommandResponse_SetupDataSource& GetAsyncCommandResponse_SetupDataSource::operator=(GetAsyncCommandResponse_SetupDataSource&&) = default;
|
|
|
|
bool GetAsyncCommandResponse_SetupDataSource::operator==(const GetAsyncCommandResponse_SetupDataSource& other) const {
|
|
return unknown_fields_ == other.unknown_fields_
|
|
&& new_instance_id_ == other.new_instance_id_
|
|
&& config_ == other.config_;
|
|
}
|
|
|
|
bool GetAsyncCommandResponse_SetupDataSource::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* new_instance_id */:
|
|
field.get(&new_instance_id_);
|
|
break;
|
|
case 2 /* config */:
|
|
(*config_).ParseFromString(field.as_std_string());
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string GetAsyncCommandResponse_SetupDataSource::SerializeAsString() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> GetAsyncCommandResponse_SetupDataSource::SerializeAsArray() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void GetAsyncCommandResponse_SetupDataSource::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: new_instance_id
|
|
if (_has_field_[1]) {
|
|
msg->AppendVarInt(1, new_instance_id_);
|
|
}
|
|
|
|
// Field 2: config
|
|
if (_has_field_[2]) {
|
|
(*config_).Serialize(msg->BeginNestedMessage<::protozero::Message>(2));
|
|
}
|
|
|
|
msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
|
|
}
|
|
|
|
|
|
GetAsyncCommandResponse_SetupTracing::GetAsyncCommandResponse_SetupTracing() = default;
|
|
GetAsyncCommandResponse_SetupTracing::~GetAsyncCommandResponse_SetupTracing() = default;
|
|
GetAsyncCommandResponse_SetupTracing::GetAsyncCommandResponse_SetupTracing(const GetAsyncCommandResponse_SetupTracing&) = default;
|
|
GetAsyncCommandResponse_SetupTracing& GetAsyncCommandResponse_SetupTracing::operator=(const GetAsyncCommandResponse_SetupTracing&) = default;
|
|
GetAsyncCommandResponse_SetupTracing::GetAsyncCommandResponse_SetupTracing(GetAsyncCommandResponse_SetupTracing&&) noexcept = default;
|
|
GetAsyncCommandResponse_SetupTracing& GetAsyncCommandResponse_SetupTracing::operator=(GetAsyncCommandResponse_SetupTracing&&) = default;
|
|
|
|
bool GetAsyncCommandResponse_SetupTracing::operator==(const GetAsyncCommandResponse_SetupTracing& other) const {
|
|
return unknown_fields_ == other.unknown_fields_
|
|
&& shared_buffer_page_size_kb_ == other.shared_buffer_page_size_kb_;
|
|
}
|
|
|
|
bool GetAsyncCommandResponse_SetupTracing::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* shared_buffer_page_size_kb */:
|
|
field.get(&shared_buffer_page_size_kb_);
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string GetAsyncCommandResponse_SetupTracing::SerializeAsString() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> GetAsyncCommandResponse_SetupTracing::SerializeAsArray() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void GetAsyncCommandResponse_SetupTracing::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: shared_buffer_page_size_kb
|
|
if (_has_field_[1]) {
|
|
msg->AppendVarInt(1, shared_buffer_page_size_kb_);
|
|
}
|
|
|
|
msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
|
|
}
|
|
|
|
|
|
GetAsyncCommandRequest::GetAsyncCommandRequest() = default;
|
|
GetAsyncCommandRequest::~GetAsyncCommandRequest() = default;
|
|
GetAsyncCommandRequest::GetAsyncCommandRequest(const GetAsyncCommandRequest&) = default;
|
|
GetAsyncCommandRequest& GetAsyncCommandRequest::operator=(const GetAsyncCommandRequest&) = default;
|
|
GetAsyncCommandRequest::GetAsyncCommandRequest(GetAsyncCommandRequest&&) noexcept = default;
|
|
GetAsyncCommandRequest& GetAsyncCommandRequest::operator=(GetAsyncCommandRequest&&) = default;
|
|
|
|
bool GetAsyncCommandRequest::operator==(const GetAsyncCommandRequest& other) const {
|
|
return unknown_fields_ == other.unknown_fields_;
|
|
}
|
|
|
|
bool GetAsyncCommandRequest::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string GetAsyncCommandRequest::SerializeAsString() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> GetAsyncCommandRequest::SerializeAsArray() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void GetAsyncCommandRequest::Serialize(::protozero::Message* msg) const {
|
|
msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
|
|
}
|
|
|
|
|
|
ActivateTriggersResponse::ActivateTriggersResponse() = default;
|
|
ActivateTriggersResponse::~ActivateTriggersResponse() = default;
|
|
ActivateTriggersResponse::ActivateTriggersResponse(const ActivateTriggersResponse&) = default;
|
|
ActivateTriggersResponse& ActivateTriggersResponse::operator=(const ActivateTriggersResponse&) = default;
|
|
ActivateTriggersResponse::ActivateTriggersResponse(ActivateTriggersResponse&&) noexcept = default;
|
|
ActivateTriggersResponse& ActivateTriggersResponse::operator=(ActivateTriggersResponse&&) = default;
|
|
|
|
bool ActivateTriggersResponse::operator==(const ActivateTriggersResponse& other) const {
|
|
return unknown_fields_ == other.unknown_fields_;
|
|
}
|
|
|
|
bool ActivateTriggersResponse::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string ActivateTriggersResponse::SerializeAsString() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> ActivateTriggersResponse::SerializeAsArray() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void ActivateTriggersResponse::Serialize(::protozero::Message* msg) const {
|
|
msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
|
|
}
|
|
|
|
|
|
ActivateTriggersRequest::ActivateTriggersRequest() = default;
|
|
ActivateTriggersRequest::~ActivateTriggersRequest() = default;
|
|
ActivateTriggersRequest::ActivateTriggersRequest(const ActivateTriggersRequest&) = default;
|
|
ActivateTriggersRequest& ActivateTriggersRequest::operator=(const ActivateTriggersRequest&) = default;
|
|
ActivateTriggersRequest::ActivateTriggersRequest(ActivateTriggersRequest&&) noexcept = default;
|
|
ActivateTriggersRequest& ActivateTriggersRequest::operator=(ActivateTriggersRequest&&) = default;
|
|
|
|
bool ActivateTriggersRequest::operator==(const ActivateTriggersRequest& other) const {
|
|
return unknown_fields_ == other.unknown_fields_
|
|
&& trigger_names_ == other.trigger_names_;
|
|
}
|
|
|
|
bool ActivateTriggersRequest::ParseFromArray(const void* raw, size_t size) {
|
|
trigger_names_.clear();
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* trigger_names */:
|
|
trigger_names_.emplace_back();
|
|
field.get(&trigger_names_.back());
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string ActivateTriggersRequest::SerializeAsString() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> ActivateTriggersRequest::SerializeAsArray() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void ActivateTriggersRequest::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: trigger_names
|
|
for (auto& it : trigger_names_) {
|
|
msg->AppendString(1, it);
|
|
}
|
|
|
|
msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
|
|
}
|
|
|
|
|
|
NotifyDataSourceStoppedResponse::NotifyDataSourceStoppedResponse() = default;
|
|
NotifyDataSourceStoppedResponse::~NotifyDataSourceStoppedResponse() = default;
|
|
NotifyDataSourceStoppedResponse::NotifyDataSourceStoppedResponse(const NotifyDataSourceStoppedResponse&) = default;
|
|
NotifyDataSourceStoppedResponse& NotifyDataSourceStoppedResponse::operator=(const NotifyDataSourceStoppedResponse&) = default;
|
|
NotifyDataSourceStoppedResponse::NotifyDataSourceStoppedResponse(NotifyDataSourceStoppedResponse&&) noexcept = default;
|
|
NotifyDataSourceStoppedResponse& NotifyDataSourceStoppedResponse::operator=(NotifyDataSourceStoppedResponse&&) = default;
|
|
|
|
bool NotifyDataSourceStoppedResponse::operator==(const NotifyDataSourceStoppedResponse& other) const {
|
|
return unknown_fields_ == other.unknown_fields_;
|
|
}
|
|
|
|
bool NotifyDataSourceStoppedResponse::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string NotifyDataSourceStoppedResponse::SerializeAsString() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> NotifyDataSourceStoppedResponse::SerializeAsArray() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void NotifyDataSourceStoppedResponse::Serialize(::protozero::Message* msg) const {
|
|
msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
|
|
}
|
|
|
|
|
|
NotifyDataSourceStoppedRequest::NotifyDataSourceStoppedRequest() = default;
|
|
NotifyDataSourceStoppedRequest::~NotifyDataSourceStoppedRequest() = default;
|
|
NotifyDataSourceStoppedRequest::NotifyDataSourceStoppedRequest(const NotifyDataSourceStoppedRequest&) = default;
|
|
NotifyDataSourceStoppedRequest& NotifyDataSourceStoppedRequest::operator=(const NotifyDataSourceStoppedRequest&) = default;
|
|
NotifyDataSourceStoppedRequest::NotifyDataSourceStoppedRequest(NotifyDataSourceStoppedRequest&&) noexcept = default;
|
|
NotifyDataSourceStoppedRequest& NotifyDataSourceStoppedRequest::operator=(NotifyDataSourceStoppedRequest&&) = default;
|
|
|
|
bool NotifyDataSourceStoppedRequest::operator==(const NotifyDataSourceStoppedRequest& other) const {
|
|
return unknown_fields_ == other.unknown_fields_
|
|
&& data_source_id_ == other.data_source_id_;
|
|
}
|
|
|
|
bool NotifyDataSourceStoppedRequest::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* data_source_id */:
|
|
field.get(&data_source_id_);
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string NotifyDataSourceStoppedRequest::SerializeAsString() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> NotifyDataSourceStoppedRequest::SerializeAsArray() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void NotifyDataSourceStoppedRequest::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: data_source_id
|
|
if (_has_field_[1]) {
|
|
msg->AppendVarInt(1, data_source_id_);
|
|
}
|
|
|
|
msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
|
|
}
|
|
|
|
|
|
NotifyDataSourceStartedResponse::NotifyDataSourceStartedResponse() = default;
|
|
NotifyDataSourceStartedResponse::~NotifyDataSourceStartedResponse() = default;
|
|
NotifyDataSourceStartedResponse::NotifyDataSourceStartedResponse(const NotifyDataSourceStartedResponse&) = default;
|
|
NotifyDataSourceStartedResponse& NotifyDataSourceStartedResponse::operator=(const NotifyDataSourceStartedResponse&) = default;
|
|
NotifyDataSourceStartedResponse::NotifyDataSourceStartedResponse(NotifyDataSourceStartedResponse&&) noexcept = default;
|
|
NotifyDataSourceStartedResponse& NotifyDataSourceStartedResponse::operator=(NotifyDataSourceStartedResponse&&) = default;
|
|
|
|
bool NotifyDataSourceStartedResponse::operator==(const NotifyDataSourceStartedResponse& other) const {
|
|
return unknown_fields_ == other.unknown_fields_;
|
|
}
|
|
|
|
bool NotifyDataSourceStartedResponse::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string NotifyDataSourceStartedResponse::SerializeAsString() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> NotifyDataSourceStartedResponse::SerializeAsArray() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void NotifyDataSourceStartedResponse::Serialize(::protozero::Message* msg) const {
|
|
msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
|
|
}
|
|
|
|
|
|
NotifyDataSourceStartedRequest::NotifyDataSourceStartedRequest() = default;
|
|
NotifyDataSourceStartedRequest::~NotifyDataSourceStartedRequest() = default;
|
|
NotifyDataSourceStartedRequest::NotifyDataSourceStartedRequest(const NotifyDataSourceStartedRequest&) = default;
|
|
NotifyDataSourceStartedRequest& NotifyDataSourceStartedRequest::operator=(const NotifyDataSourceStartedRequest&) = default;
|
|
NotifyDataSourceStartedRequest::NotifyDataSourceStartedRequest(NotifyDataSourceStartedRequest&&) noexcept = default;
|
|
NotifyDataSourceStartedRequest& NotifyDataSourceStartedRequest::operator=(NotifyDataSourceStartedRequest&&) = default;
|
|
|
|
bool NotifyDataSourceStartedRequest::operator==(const NotifyDataSourceStartedRequest& other) const {
|
|
return unknown_fields_ == other.unknown_fields_
|
|
&& data_source_id_ == other.data_source_id_;
|
|
}
|
|
|
|
bool NotifyDataSourceStartedRequest::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* data_source_id */:
|
|
field.get(&data_source_id_);
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string NotifyDataSourceStartedRequest::SerializeAsString() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> NotifyDataSourceStartedRequest::SerializeAsArray() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void NotifyDataSourceStartedRequest::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: data_source_id
|
|
if (_has_field_[1]) {
|
|
msg->AppendVarInt(1, data_source_id_);
|
|
}
|
|
|
|
msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
|
|
}
|
|
|
|
|
|
CommitDataResponse::CommitDataResponse() = default;
|
|
CommitDataResponse::~CommitDataResponse() = default;
|
|
CommitDataResponse::CommitDataResponse(const CommitDataResponse&) = default;
|
|
CommitDataResponse& CommitDataResponse::operator=(const CommitDataResponse&) = default;
|
|
CommitDataResponse::CommitDataResponse(CommitDataResponse&&) noexcept = default;
|
|
CommitDataResponse& CommitDataResponse::operator=(CommitDataResponse&&) = default;
|
|
|
|
bool CommitDataResponse::operator==(const CommitDataResponse& other) const {
|
|
return unknown_fields_ == other.unknown_fields_;
|
|
}
|
|
|
|
bool CommitDataResponse::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string CommitDataResponse::SerializeAsString() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> CommitDataResponse::SerializeAsArray() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void CommitDataResponse::Serialize(::protozero::Message* msg) const {
|
|
msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
|
|
}
|
|
|
|
|
|
UnregisterTraceWriterResponse::UnregisterTraceWriterResponse() = default;
|
|
UnregisterTraceWriterResponse::~UnregisterTraceWriterResponse() = default;
|
|
UnregisterTraceWriterResponse::UnregisterTraceWriterResponse(const UnregisterTraceWriterResponse&) = default;
|
|
UnregisterTraceWriterResponse& UnregisterTraceWriterResponse::operator=(const UnregisterTraceWriterResponse&) = default;
|
|
UnregisterTraceWriterResponse::UnregisterTraceWriterResponse(UnregisterTraceWriterResponse&&) noexcept = default;
|
|
UnregisterTraceWriterResponse& UnregisterTraceWriterResponse::operator=(UnregisterTraceWriterResponse&&) = default;
|
|
|
|
bool UnregisterTraceWriterResponse::operator==(const UnregisterTraceWriterResponse& other) const {
|
|
return unknown_fields_ == other.unknown_fields_;
|
|
}
|
|
|
|
bool UnregisterTraceWriterResponse::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string UnregisterTraceWriterResponse::SerializeAsString() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> UnregisterTraceWriterResponse::SerializeAsArray() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void UnregisterTraceWriterResponse::Serialize(::protozero::Message* msg) const {
|
|
msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
|
|
}
|
|
|
|
|
|
UnregisterTraceWriterRequest::UnregisterTraceWriterRequest() = default;
|
|
UnregisterTraceWriterRequest::~UnregisterTraceWriterRequest() = default;
|
|
UnregisterTraceWriterRequest::UnregisterTraceWriterRequest(const UnregisterTraceWriterRequest&) = default;
|
|
UnregisterTraceWriterRequest& UnregisterTraceWriterRequest::operator=(const UnregisterTraceWriterRequest&) = default;
|
|
UnregisterTraceWriterRequest::UnregisterTraceWriterRequest(UnregisterTraceWriterRequest&&) noexcept = default;
|
|
UnregisterTraceWriterRequest& UnregisterTraceWriterRequest::operator=(UnregisterTraceWriterRequest&&) = default;
|
|
|
|
bool UnregisterTraceWriterRequest::operator==(const UnregisterTraceWriterRequest& other) const {
|
|
return unknown_fields_ == other.unknown_fields_
|
|
&& trace_writer_id_ == other.trace_writer_id_;
|
|
}
|
|
|
|
bool UnregisterTraceWriterRequest::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* trace_writer_id */:
|
|
field.get(&trace_writer_id_);
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string UnregisterTraceWriterRequest::SerializeAsString() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> UnregisterTraceWriterRequest::SerializeAsArray() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void UnregisterTraceWriterRequest::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: trace_writer_id
|
|
if (_has_field_[1]) {
|
|
msg->AppendVarInt(1, trace_writer_id_);
|
|
}
|
|
|
|
msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
|
|
}
|
|
|
|
|
|
RegisterTraceWriterResponse::RegisterTraceWriterResponse() = default;
|
|
RegisterTraceWriterResponse::~RegisterTraceWriterResponse() = default;
|
|
RegisterTraceWriterResponse::RegisterTraceWriterResponse(const RegisterTraceWriterResponse&) = default;
|
|
RegisterTraceWriterResponse& RegisterTraceWriterResponse::operator=(const RegisterTraceWriterResponse&) = default;
|
|
RegisterTraceWriterResponse::RegisterTraceWriterResponse(RegisterTraceWriterResponse&&) noexcept = default;
|
|
RegisterTraceWriterResponse& RegisterTraceWriterResponse::operator=(RegisterTraceWriterResponse&&) = default;
|
|
|
|
bool RegisterTraceWriterResponse::operator==(const RegisterTraceWriterResponse& other) const {
|
|
return unknown_fields_ == other.unknown_fields_;
|
|
}
|
|
|
|
bool RegisterTraceWriterResponse::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string RegisterTraceWriterResponse::SerializeAsString() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> RegisterTraceWriterResponse::SerializeAsArray() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void RegisterTraceWriterResponse::Serialize(::protozero::Message* msg) const {
|
|
msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
|
|
}
|
|
|
|
|
|
RegisterTraceWriterRequest::RegisterTraceWriterRequest() = default;
|
|
RegisterTraceWriterRequest::~RegisterTraceWriterRequest() = default;
|
|
RegisterTraceWriterRequest::RegisterTraceWriterRequest(const RegisterTraceWriterRequest&) = default;
|
|
RegisterTraceWriterRequest& RegisterTraceWriterRequest::operator=(const RegisterTraceWriterRequest&) = default;
|
|
RegisterTraceWriterRequest::RegisterTraceWriterRequest(RegisterTraceWriterRequest&&) noexcept = default;
|
|
RegisterTraceWriterRequest& RegisterTraceWriterRequest::operator=(RegisterTraceWriterRequest&&) = default;
|
|
|
|
bool RegisterTraceWriterRequest::operator==(const RegisterTraceWriterRequest& other) const {
|
|
return unknown_fields_ == other.unknown_fields_
|
|
&& trace_writer_id_ == other.trace_writer_id_
|
|
&& target_buffer_ == other.target_buffer_;
|
|
}
|
|
|
|
bool RegisterTraceWriterRequest::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* trace_writer_id */:
|
|
field.get(&trace_writer_id_);
|
|
break;
|
|
case 2 /* target_buffer */:
|
|
field.get(&target_buffer_);
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string RegisterTraceWriterRequest::SerializeAsString() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> RegisterTraceWriterRequest::SerializeAsArray() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void RegisterTraceWriterRequest::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: trace_writer_id
|
|
if (_has_field_[1]) {
|
|
msg->AppendVarInt(1, trace_writer_id_);
|
|
}
|
|
|
|
// Field 2: target_buffer
|
|
if (_has_field_[2]) {
|
|
msg->AppendVarInt(2, target_buffer_);
|
|
}
|
|
|
|
msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
|
|
}
|
|
|
|
|
|
UnregisterDataSourceResponse::UnregisterDataSourceResponse() = default;
|
|
UnregisterDataSourceResponse::~UnregisterDataSourceResponse() = default;
|
|
UnregisterDataSourceResponse::UnregisterDataSourceResponse(const UnregisterDataSourceResponse&) = default;
|
|
UnregisterDataSourceResponse& UnregisterDataSourceResponse::operator=(const UnregisterDataSourceResponse&) = default;
|
|
UnregisterDataSourceResponse::UnregisterDataSourceResponse(UnregisterDataSourceResponse&&) noexcept = default;
|
|
UnregisterDataSourceResponse& UnregisterDataSourceResponse::operator=(UnregisterDataSourceResponse&&) = default;
|
|
|
|
bool UnregisterDataSourceResponse::operator==(const UnregisterDataSourceResponse& other) const {
|
|
return unknown_fields_ == other.unknown_fields_;
|
|
}
|
|
|
|
bool UnregisterDataSourceResponse::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string UnregisterDataSourceResponse::SerializeAsString() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> UnregisterDataSourceResponse::SerializeAsArray() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void UnregisterDataSourceResponse::Serialize(::protozero::Message* msg) const {
|
|
msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
|
|
}
|
|
|
|
|
|
UnregisterDataSourceRequest::UnregisterDataSourceRequest() = default;
|
|
UnregisterDataSourceRequest::~UnregisterDataSourceRequest() = default;
|
|
UnregisterDataSourceRequest::UnregisterDataSourceRequest(const UnregisterDataSourceRequest&) = default;
|
|
UnregisterDataSourceRequest& UnregisterDataSourceRequest::operator=(const UnregisterDataSourceRequest&) = default;
|
|
UnregisterDataSourceRequest::UnregisterDataSourceRequest(UnregisterDataSourceRequest&&) noexcept = default;
|
|
UnregisterDataSourceRequest& UnregisterDataSourceRequest::operator=(UnregisterDataSourceRequest&&) = default;
|
|
|
|
bool UnregisterDataSourceRequest::operator==(const UnregisterDataSourceRequest& other) const {
|
|
return unknown_fields_ == other.unknown_fields_
|
|
&& data_source_name_ == other.data_source_name_;
|
|
}
|
|
|
|
bool UnregisterDataSourceRequest::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* data_source_name */:
|
|
field.get(&data_source_name_);
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string UnregisterDataSourceRequest::SerializeAsString() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> UnregisterDataSourceRequest::SerializeAsArray() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void UnregisterDataSourceRequest::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: data_source_name
|
|
if (_has_field_[1]) {
|
|
msg->AppendString(1, data_source_name_);
|
|
}
|
|
|
|
msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
|
|
}
|
|
|
|
|
|
RegisterDataSourceResponse::RegisterDataSourceResponse() = default;
|
|
RegisterDataSourceResponse::~RegisterDataSourceResponse() = default;
|
|
RegisterDataSourceResponse::RegisterDataSourceResponse(const RegisterDataSourceResponse&) = default;
|
|
RegisterDataSourceResponse& RegisterDataSourceResponse::operator=(const RegisterDataSourceResponse&) = default;
|
|
RegisterDataSourceResponse::RegisterDataSourceResponse(RegisterDataSourceResponse&&) noexcept = default;
|
|
RegisterDataSourceResponse& RegisterDataSourceResponse::operator=(RegisterDataSourceResponse&&) = default;
|
|
|
|
bool RegisterDataSourceResponse::operator==(const RegisterDataSourceResponse& other) const {
|
|
return unknown_fields_ == other.unknown_fields_
|
|
&& error_ == other.error_;
|
|
}
|
|
|
|
bool RegisterDataSourceResponse::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* error */:
|
|
field.get(&error_);
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string RegisterDataSourceResponse::SerializeAsString() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> RegisterDataSourceResponse::SerializeAsArray() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void RegisterDataSourceResponse::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: error
|
|
if (_has_field_[1]) {
|
|
msg->AppendString(1, error_);
|
|
}
|
|
|
|
msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
|
|
}
|
|
|
|
|
|
RegisterDataSourceRequest::RegisterDataSourceRequest() = default;
|
|
RegisterDataSourceRequest::~RegisterDataSourceRequest() = default;
|
|
RegisterDataSourceRequest::RegisterDataSourceRequest(const RegisterDataSourceRequest&) = default;
|
|
RegisterDataSourceRequest& RegisterDataSourceRequest::operator=(const RegisterDataSourceRequest&) = default;
|
|
RegisterDataSourceRequest::RegisterDataSourceRequest(RegisterDataSourceRequest&&) noexcept = default;
|
|
RegisterDataSourceRequest& RegisterDataSourceRequest::operator=(RegisterDataSourceRequest&&) = default;
|
|
|
|
bool RegisterDataSourceRequest::operator==(const RegisterDataSourceRequest& other) const {
|
|
return unknown_fields_ == other.unknown_fields_
|
|
&& data_source_descriptor_ == other.data_source_descriptor_;
|
|
}
|
|
|
|
bool RegisterDataSourceRequest::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* data_source_descriptor */:
|
|
(*data_source_descriptor_).ParseFromString(field.as_std_string());
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string RegisterDataSourceRequest::SerializeAsString() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> RegisterDataSourceRequest::SerializeAsArray() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void RegisterDataSourceRequest::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: data_source_descriptor
|
|
if (_has_field_[1]) {
|
|
(*data_source_descriptor_).Serialize(msg->BeginNestedMessage<::protozero::Message>(1));
|
|
}
|
|
|
|
msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
|
|
}
|
|
|
|
|
|
InitializeConnectionResponse::InitializeConnectionResponse() = default;
|
|
InitializeConnectionResponse::~InitializeConnectionResponse() = default;
|
|
InitializeConnectionResponse::InitializeConnectionResponse(const InitializeConnectionResponse&) = default;
|
|
InitializeConnectionResponse& InitializeConnectionResponse::operator=(const InitializeConnectionResponse&) = default;
|
|
InitializeConnectionResponse::InitializeConnectionResponse(InitializeConnectionResponse&&) noexcept = default;
|
|
InitializeConnectionResponse& InitializeConnectionResponse::operator=(InitializeConnectionResponse&&) = default;
|
|
|
|
bool InitializeConnectionResponse::operator==(const InitializeConnectionResponse& other) const {
|
|
return unknown_fields_ == other.unknown_fields_
|
|
&& using_shmem_provided_by_producer_ == other.using_shmem_provided_by_producer_;
|
|
}
|
|
|
|
bool InitializeConnectionResponse::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* using_shmem_provided_by_producer */:
|
|
field.get(&using_shmem_provided_by_producer_);
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string InitializeConnectionResponse::SerializeAsString() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> InitializeConnectionResponse::SerializeAsArray() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void InitializeConnectionResponse::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: using_shmem_provided_by_producer
|
|
if (_has_field_[1]) {
|
|
msg->AppendTinyVarInt(1, using_shmem_provided_by_producer_);
|
|
}
|
|
|
|
msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
|
|
}
|
|
|
|
|
|
InitializeConnectionRequest::InitializeConnectionRequest() = default;
|
|
InitializeConnectionRequest::~InitializeConnectionRequest() = default;
|
|
InitializeConnectionRequest::InitializeConnectionRequest(const InitializeConnectionRequest&) = default;
|
|
InitializeConnectionRequest& InitializeConnectionRequest::operator=(const InitializeConnectionRequest&) = default;
|
|
InitializeConnectionRequest::InitializeConnectionRequest(InitializeConnectionRequest&&) noexcept = default;
|
|
InitializeConnectionRequest& InitializeConnectionRequest::operator=(InitializeConnectionRequest&&) = default;
|
|
|
|
bool InitializeConnectionRequest::operator==(const InitializeConnectionRequest& other) const {
|
|
return unknown_fields_ == other.unknown_fields_
|
|
&& shared_memory_page_size_hint_bytes_ == other.shared_memory_page_size_hint_bytes_
|
|
&& shared_memory_size_hint_bytes_ == other.shared_memory_size_hint_bytes_
|
|
&& producer_name_ == other.producer_name_
|
|
&& smb_scraping_mode_ == other.smb_scraping_mode_
|
|
&& build_flags_ == other.build_flags_
|
|
&& producer_provided_shmem_ == other.producer_provided_shmem_;
|
|
}
|
|
|
|
bool InitializeConnectionRequest::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* shared_memory_page_size_hint_bytes */:
|
|
field.get(&shared_memory_page_size_hint_bytes_);
|
|
break;
|
|
case 2 /* shared_memory_size_hint_bytes */:
|
|
field.get(&shared_memory_size_hint_bytes_);
|
|
break;
|
|
case 3 /* producer_name */:
|
|
field.get(&producer_name_);
|
|
break;
|
|
case 4 /* smb_scraping_mode */:
|
|
field.get(&smb_scraping_mode_);
|
|
break;
|
|
case 5 /* build_flags */:
|
|
field.get(&build_flags_);
|
|
break;
|
|
case 6 /* producer_provided_shmem */:
|
|
field.get(&producer_provided_shmem_);
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string InitializeConnectionRequest::SerializeAsString() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> InitializeConnectionRequest::SerializeAsArray() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void InitializeConnectionRequest::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: shared_memory_page_size_hint_bytes
|
|
if (_has_field_[1]) {
|
|
msg->AppendVarInt(1, shared_memory_page_size_hint_bytes_);
|
|
}
|
|
|
|
// Field 2: shared_memory_size_hint_bytes
|
|
if (_has_field_[2]) {
|
|
msg->AppendVarInt(2, shared_memory_size_hint_bytes_);
|
|
}
|
|
|
|
// Field 3: producer_name
|
|
if (_has_field_[3]) {
|
|
msg->AppendString(3, producer_name_);
|
|
}
|
|
|
|
// Field 4: smb_scraping_mode
|
|
if (_has_field_[4]) {
|
|
msg->AppendVarInt(4, smb_scraping_mode_);
|
|
}
|
|
|
|
// Field 5: build_flags
|
|
if (_has_field_[5]) {
|
|
msg->AppendVarInt(5, build_flags_);
|
|
}
|
|
|
|
// Field 6: producer_provided_shmem
|
|
if (_has_field_[6]) {
|
|
msg->AppendTinyVarInt(6, producer_provided_shmem_);
|
|
}
|
|
|
|
msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
|
|
}
|
|
|
|
} // namespace perfetto
|
|
} // namespace protos
|
|
} // namespace gen
|
|
#pragma GCC diagnostic pop
|
|
// gen_amalgamated begin source: gen/protos/perfetto/ipc/wire_protocol.gen.cc
|
|
// gen_amalgamated begin header: gen/protos/perfetto/ipc/wire_protocol.gen.h
|
|
// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
|
|
#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_IPC_WIRE_PROTOCOL_PROTO_CPP_H_
|
|
#define PERFETTO_PROTOS_PROTOS_PERFETTO_IPC_WIRE_PROTOCOL_PROTO_CPP_H_
|
|
|
|
#include <stdint.h>
|
|
#include <bitset>
|
|
#include <vector>
|
|
#include <string>
|
|
#include <type_traits>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/cpp_message_obj.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/copyable_ptr.h"
|
|
// gen_amalgamated expanded: #include "perfetto/base/export.h"
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace gen {
|
|
class IPCFrame;
|
|
class IPCFrame_RequestError;
|
|
class IPCFrame_InvokeMethodReply;
|
|
class IPCFrame_InvokeMethod;
|
|
class IPCFrame_BindServiceReply;
|
|
class IPCFrame_BindServiceReply_MethodInfo;
|
|
class IPCFrame_BindService;
|
|
} // namespace perfetto
|
|
} // namespace protos
|
|
} // namespace gen
|
|
|
|
namespace protozero {
|
|
class Message;
|
|
} // namespace protozero
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace gen {
|
|
|
|
class PERFETTO_EXPORT IPCFrame : public ::protozero::CppMessageObj {
|
|
public:
|
|
using BindService = IPCFrame_BindService;
|
|
using BindServiceReply = IPCFrame_BindServiceReply;
|
|
using InvokeMethod = IPCFrame_InvokeMethod;
|
|
using InvokeMethodReply = IPCFrame_InvokeMethodReply;
|
|
using RequestError = IPCFrame_RequestError;
|
|
enum FieldNumbers {
|
|
kRequestIdFieldNumber = 2,
|
|
kMsgBindServiceFieldNumber = 3,
|
|
kMsgBindServiceReplyFieldNumber = 4,
|
|
kMsgInvokeMethodFieldNumber = 5,
|
|
kMsgInvokeMethodReplyFieldNumber = 6,
|
|
kMsgRequestErrorFieldNumber = 7,
|
|
kDataForTestingFieldNumber = 1,
|
|
};
|
|
|
|
IPCFrame();
|
|
~IPCFrame() override;
|
|
IPCFrame(IPCFrame&&) noexcept;
|
|
IPCFrame& operator=(IPCFrame&&);
|
|
IPCFrame(const IPCFrame&);
|
|
IPCFrame& operator=(const IPCFrame&);
|
|
bool operator==(const IPCFrame&) const;
|
|
bool operator!=(const IPCFrame& other) const { return !(*this == other); }
|
|
|
|
bool ParseFromArray(const void*, size_t) override;
|
|
std::string SerializeAsString() const override;
|
|
std::vector<uint8_t> SerializeAsArray() const override;
|
|
void Serialize(::protozero::Message*) const;
|
|
|
|
bool has_request_id() const { return _has_field_[2]; }
|
|
uint64_t request_id() const { return request_id_; }
|
|
void set_request_id(uint64_t value) { request_id_ = value; _has_field_.set(2); }
|
|
|
|
bool has_msg_bind_service() const { return _has_field_[3]; }
|
|
const IPCFrame_BindService& msg_bind_service() const { return *msg_bind_service_; }
|
|
IPCFrame_BindService* mutable_msg_bind_service() { _has_field_.set(3); return msg_bind_service_.get(); }
|
|
|
|
bool has_msg_bind_service_reply() const { return _has_field_[4]; }
|
|
const IPCFrame_BindServiceReply& msg_bind_service_reply() const { return *msg_bind_service_reply_; }
|
|
IPCFrame_BindServiceReply* mutable_msg_bind_service_reply() { _has_field_.set(4); return msg_bind_service_reply_.get(); }
|
|
|
|
bool has_msg_invoke_method() const { return _has_field_[5]; }
|
|
const IPCFrame_InvokeMethod& msg_invoke_method() const { return *msg_invoke_method_; }
|
|
IPCFrame_InvokeMethod* mutable_msg_invoke_method() { _has_field_.set(5); return msg_invoke_method_.get(); }
|
|
|
|
bool has_msg_invoke_method_reply() const { return _has_field_[6]; }
|
|
const IPCFrame_InvokeMethodReply& msg_invoke_method_reply() const { return *msg_invoke_method_reply_; }
|
|
IPCFrame_InvokeMethodReply* mutable_msg_invoke_method_reply() { _has_field_.set(6); return msg_invoke_method_reply_.get(); }
|
|
|
|
bool has_msg_request_error() const { return _has_field_[7]; }
|
|
const IPCFrame_RequestError& msg_request_error() const { return *msg_request_error_; }
|
|
IPCFrame_RequestError* mutable_msg_request_error() { _has_field_.set(7); return msg_request_error_.get(); }
|
|
|
|
int data_for_testing_size() const { return static_cast<int>(data_for_testing_.size()); }
|
|
const std::vector<std::string>& data_for_testing() const { return data_for_testing_; }
|
|
std::vector<std::string>* mutable_data_for_testing() { return &data_for_testing_; }
|
|
void clear_data_for_testing() { data_for_testing_.clear(); }
|
|
void add_data_for_testing(std::string value) { data_for_testing_.emplace_back(value); }
|
|
std::string* add_data_for_testing() { data_for_testing_.emplace_back(); return &data_for_testing_.back(); }
|
|
|
|
private:
|
|
uint64_t request_id_{};
|
|
::protozero::CopyablePtr<IPCFrame_BindService> msg_bind_service_;
|
|
::protozero::CopyablePtr<IPCFrame_BindServiceReply> msg_bind_service_reply_;
|
|
::protozero::CopyablePtr<IPCFrame_InvokeMethod> msg_invoke_method_;
|
|
::protozero::CopyablePtr<IPCFrame_InvokeMethodReply> msg_invoke_method_reply_;
|
|
::protozero::CopyablePtr<IPCFrame_RequestError> msg_request_error_;
|
|
std::vector<std::string> data_for_testing_;
|
|
|
|
// Allows to preserve unknown protobuf fields for compatibility
|
|
// with future versions of .proto files.
|
|
std::string unknown_fields_;
|
|
|
|
std::bitset<8> _has_field_{};
|
|
};
|
|
|
|
|
|
class PERFETTO_EXPORT IPCFrame_RequestError : public ::protozero::CppMessageObj {
|
|
public:
|
|
enum FieldNumbers {
|
|
kErrorFieldNumber = 1,
|
|
};
|
|
|
|
IPCFrame_RequestError();
|
|
~IPCFrame_RequestError() override;
|
|
IPCFrame_RequestError(IPCFrame_RequestError&&) noexcept;
|
|
IPCFrame_RequestError& operator=(IPCFrame_RequestError&&);
|
|
IPCFrame_RequestError(const IPCFrame_RequestError&);
|
|
IPCFrame_RequestError& operator=(const IPCFrame_RequestError&);
|
|
bool operator==(const IPCFrame_RequestError&) const;
|
|
bool operator!=(const IPCFrame_RequestError& other) const { return !(*this == other); }
|
|
|
|
bool ParseFromArray(const void*, size_t) override;
|
|
std::string SerializeAsString() const override;
|
|
std::vector<uint8_t> SerializeAsArray() const override;
|
|
void Serialize(::protozero::Message*) const;
|
|
|
|
bool has_error() const { return _has_field_[1]; }
|
|
const std::string& error() const { return error_; }
|
|
void set_error(const std::string& value) { error_ = value; _has_field_.set(1); }
|
|
|
|
private:
|
|
std::string error_{};
|
|
|
|
// Allows to preserve unknown protobuf fields for compatibility
|
|
// with future versions of .proto files.
|
|
std::string unknown_fields_;
|
|
|
|
std::bitset<2> _has_field_{};
|
|
};
|
|
|
|
|
|
class PERFETTO_EXPORT IPCFrame_InvokeMethodReply : public ::protozero::CppMessageObj {
|
|
public:
|
|
enum FieldNumbers {
|
|
kSuccessFieldNumber = 1,
|
|
kHasMoreFieldNumber = 2,
|
|
kReplyProtoFieldNumber = 3,
|
|
};
|
|
|
|
IPCFrame_InvokeMethodReply();
|
|
~IPCFrame_InvokeMethodReply() override;
|
|
IPCFrame_InvokeMethodReply(IPCFrame_InvokeMethodReply&&) noexcept;
|
|
IPCFrame_InvokeMethodReply& operator=(IPCFrame_InvokeMethodReply&&);
|
|
IPCFrame_InvokeMethodReply(const IPCFrame_InvokeMethodReply&);
|
|
IPCFrame_InvokeMethodReply& operator=(const IPCFrame_InvokeMethodReply&);
|
|
bool operator==(const IPCFrame_InvokeMethodReply&) const;
|
|
bool operator!=(const IPCFrame_InvokeMethodReply& other) const { return !(*this == other); }
|
|
|
|
bool ParseFromArray(const void*, size_t) override;
|
|
std::string SerializeAsString() const override;
|
|
std::vector<uint8_t> SerializeAsArray() const override;
|
|
void Serialize(::protozero::Message*) const;
|
|
|
|
bool has_success() const { return _has_field_[1]; }
|
|
bool success() const { return success_; }
|
|
void set_success(bool value) { success_ = value; _has_field_.set(1); }
|
|
|
|
bool has_has_more() const { return _has_field_[2]; }
|
|
bool has_more() const { return has_more_; }
|
|
void set_has_more(bool value) { has_more_ = value; _has_field_.set(2); }
|
|
|
|
bool has_reply_proto() const { return _has_field_[3]; }
|
|
const std::string& reply_proto() const { return reply_proto_; }
|
|
void set_reply_proto(const std::string& value) { reply_proto_ = value; _has_field_.set(3); }
|
|
void set_reply_proto(const void* p, size_t s) { reply_proto_.assign(reinterpret_cast<const char*>(p), s); _has_field_.set(3); }
|
|
|
|
private:
|
|
bool success_{};
|
|
bool has_more_{};
|
|
std::string reply_proto_{};
|
|
|
|
// Allows to preserve unknown protobuf fields for compatibility
|
|
// with future versions of .proto files.
|
|
std::string unknown_fields_;
|
|
|
|
std::bitset<4> _has_field_{};
|
|
};
|
|
|
|
|
|
class PERFETTO_EXPORT IPCFrame_InvokeMethod : public ::protozero::CppMessageObj {
|
|
public:
|
|
enum FieldNumbers {
|
|
kServiceIdFieldNumber = 1,
|
|
kMethodIdFieldNumber = 2,
|
|
kArgsProtoFieldNumber = 3,
|
|
kDropReplyFieldNumber = 4,
|
|
};
|
|
|
|
IPCFrame_InvokeMethod();
|
|
~IPCFrame_InvokeMethod() override;
|
|
IPCFrame_InvokeMethod(IPCFrame_InvokeMethod&&) noexcept;
|
|
IPCFrame_InvokeMethod& operator=(IPCFrame_InvokeMethod&&);
|
|
IPCFrame_InvokeMethod(const IPCFrame_InvokeMethod&);
|
|
IPCFrame_InvokeMethod& operator=(const IPCFrame_InvokeMethod&);
|
|
bool operator==(const IPCFrame_InvokeMethod&) const;
|
|
bool operator!=(const IPCFrame_InvokeMethod& other) const { return !(*this == other); }
|
|
|
|
bool ParseFromArray(const void*, size_t) override;
|
|
std::string SerializeAsString() const override;
|
|
std::vector<uint8_t> SerializeAsArray() const override;
|
|
void Serialize(::protozero::Message*) const;
|
|
|
|
bool has_service_id() const { return _has_field_[1]; }
|
|
uint32_t service_id() const { return service_id_; }
|
|
void set_service_id(uint32_t value) { service_id_ = value; _has_field_.set(1); }
|
|
|
|
bool has_method_id() const { return _has_field_[2]; }
|
|
uint32_t method_id() const { return method_id_; }
|
|
void set_method_id(uint32_t value) { method_id_ = value; _has_field_.set(2); }
|
|
|
|
bool has_args_proto() const { return _has_field_[3]; }
|
|
const std::string& args_proto() const { return args_proto_; }
|
|
void set_args_proto(const std::string& value) { args_proto_ = value; _has_field_.set(3); }
|
|
void set_args_proto(const void* p, size_t s) { args_proto_.assign(reinterpret_cast<const char*>(p), s); _has_field_.set(3); }
|
|
|
|
bool has_drop_reply() const { return _has_field_[4]; }
|
|
bool drop_reply() const { return drop_reply_; }
|
|
void set_drop_reply(bool value) { drop_reply_ = value; _has_field_.set(4); }
|
|
|
|
private:
|
|
uint32_t service_id_{};
|
|
uint32_t method_id_{};
|
|
std::string args_proto_{};
|
|
bool drop_reply_{};
|
|
|
|
// Allows to preserve unknown protobuf fields for compatibility
|
|
// with future versions of .proto files.
|
|
std::string unknown_fields_;
|
|
|
|
std::bitset<5> _has_field_{};
|
|
};
|
|
|
|
|
|
class PERFETTO_EXPORT IPCFrame_BindServiceReply : public ::protozero::CppMessageObj {
|
|
public:
|
|
using MethodInfo = IPCFrame_BindServiceReply_MethodInfo;
|
|
enum FieldNumbers {
|
|
kSuccessFieldNumber = 1,
|
|
kServiceIdFieldNumber = 2,
|
|
kMethodsFieldNumber = 3,
|
|
};
|
|
|
|
IPCFrame_BindServiceReply();
|
|
~IPCFrame_BindServiceReply() override;
|
|
IPCFrame_BindServiceReply(IPCFrame_BindServiceReply&&) noexcept;
|
|
IPCFrame_BindServiceReply& operator=(IPCFrame_BindServiceReply&&);
|
|
IPCFrame_BindServiceReply(const IPCFrame_BindServiceReply&);
|
|
IPCFrame_BindServiceReply& operator=(const IPCFrame_BindServiceReply&);
|
|
bool operator==(const IPCFrame_BindServiceReply&) const;
|
|
bool operator!=(const IPCFrame_BindServiceReply& other) const { return !(*this == other); }
|
|
|
|
bool ParseFromArray(const void*, size_t) override;
|
|
std::string SerializeAsString() const override;
|
|
std::vector<uint8_t> SerializeAsArray() const override;
|
|
void Serialize(::protozero::Message*) const;
|
|
|
|
bool has_success() const { return _has_field_[1]; }
|
|
bool success() const { return success_; }
|
|
void set_success(bool value) { success_ = value; _has_field_.set(1); }
|
|
|
|
bool has_service_id() const { return _has_field_[2]; }
|
|
uint32_t service_id() const { return service_id_; }
|
|
void set_service_id(uint32_t value) { service_id_ = value; _has_field_.set(2); }
|
|
|
|
int methods_size() const { return static_cast<int>(methods_.size()); }
|
|
const std::vector<IPCFrame_BindServiceReply_MethodInfo>& methods() const { return methods_; }
|
|
std::vector<IPCFrame_BindServiceReply_MethodInfo>* mutable_methods() { return &methods_; }
|
|
void clear_methods() { methods_.clear(); }
|
|
IPCFrame_BindServiceReply_MethodInfo* add_methods() { methods_.emplace_back(); return &methods_.back(); }
|
|
|
|
private:
|
|
bool success_{};
|
|
uint32_t service_id_{};
|
|
std::vector<IPCFrame_BindServiceReply_MethodInfo> methods_;
|
|
|
|
// Allows to preserve unknown protobuf fields for compatibility
|
|
// with future versions of .proto files.
|
|
std::string unknown_fields_;
|
|
|
|
std::bitset<4> _has_field_{};
|
|
};
|
|
|
|
|
|
class PERFETTO_EXPORT IPCFrame_BindServiceReply_MethodInfo : public ::protozero::CppMessageObj {
|
|
public:
|
|
enum FieldNumbers {
|
|
kIdFieldNumber = 1,
|
|
kNameFieldNumber = 2,
|
|
};
|
|
|
|
IPCFrame_BindServiceReply_MethodInfo();
|
|
~IPCFrame_BindServiceReply_MethodInfo() override;
|
|
IPCFrame_BindServiceReply_MethodInfo(IPCFrame_BindServiceReply_MethodInfo&&) noexcept;
|
|
IPCFrame_BindServiceReply_MethodInfo& operator=(IPCFrame_BindServiceReply_MethodInfo&&);
|
|
IPCFrame_BindServiceReply_MethodInfo(const IPCFrame_BindServiceReply_MethodInfo&);
|
|
IPCFrame_BindServiceReply_MethodInfo& operator=(const IPCFrame_BindServiceReply_MethodInfo&);
|
|
bool operator==(const IPCFrame_BindServiceReply_MethodInfo&) const;
|
|
bool operator!=(const IPCFrame_BindServiceReply_MethodInfo& other) const { return !(*this == other); }
|
|
|
|
bool ParseFromArray(const void*, size_t) override;
|
|
std::string SerializeAsString() const override;
|
|
std::vector<uint8_t> SerializeAsArray() const override;
|
|
void Serialize(::protozero::Message*) const;
|
|
|
|
bool has_id() const { return _has_field_[1]; }
|
|
uint32_t id() const { return id_; }
|
|
void set_id(uint32_t value) { id_ = value; _has_field_.set(1); }
|
|
|
|
bool has_name() const { return _has_field_[2]; }
|
|
const std::string& name() const { return name_; }
|
|
void set_name(const std::string& value) { name_ = value; _has_field_.set(2); }
|
|
|
|
private:
|
|
uint32_t id_{};
|
|
std::string name_{};
|
|
|
|
// Allows to preserve unknown protobuf fields for compatibility
|
|
// with future versions of .proto files.
|
|
std::string unknown_fields_;
|
|
|
|
std::bitset<3> _has_field_{};
|
|
};
|
|
|
|
|
|
class PERFETTO_EXPORT IPCFrame_BindService : public ::protozero::CppMessageObj {
|
|
public:
|
|
enum FieldNumbers {
|
|
kServiceNameFieldNumber = 1,
|
|
};
|
|
|
|
IPCFrame_BindService();
|
|
~IPCFrame_BindService() override;
|
|
IPCFrame_BindService(IPCFrame_BindService&&) noexcept;
|
|
IPCFrame_BindService& operator=(IPCFrame_BindService&&);
|
|
IPCFrame_BindService(const IPCFrame_BindService&);
|
|
IPCFrame_BindService& operator=(const IPCFrame_BindService&);
|
|
bool operator==(const IPCFrame_BindService&) const;
|
|
bool operator!=(const IPCFrame_BindService& other) const { return !(*this == other); }
|
|
|
|
bool ParseFromArray(const void*, size_t) override;
|
|
std::string SerializeAsString() const override;
|
|
std::vector<uint8_t> SerializeAsArray() const override;
|
|
void Serialize(::protozero::Message*) const;
|
|
|
|
bool has_service_name() const { return _has_field_[1]; }
|
|
const std::string& service_name() const { return service_name_; }
|
|
void set_service_name(const std::string& value) { service_name_ = value; _has_field_.set(1); }
|
|
|
|
private:
|
|
std::string service_name_{};
|
|
|
|
// Allows to preserve unknown protobuf fields for compatibility
|
|
// with future versions of .proto files.
|
|
std::string unknown_fields_;
|
|
|
|
std::bitset<2> _has_field_{};
|
|
};
|
|
|
|
} // namespace perfetto
|
|
} // namespace protos
|
|
} // namespace gen
|
|
|
|
#endif // PERFETTO_PROTOS_PROTOS_PERFETTO_IPC_WIRE_PROTOCOL_PROTO_CPP_H_
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/scattered_heap_buffer.h"
|
|
// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
|
|
#pragma GCC diagnostic push
|
|
#pragma GCC diagnostic ignored "-Wfloat-equal"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/ipc/wire_protocol.gen.h"
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace gen {
|
|
|
|
IPCFrame::IPCFrame() = default;
|
|
IPCFrame::~IPCFrame() = default;
|
|
IPCFrame::IPCFrame(const IPCFrame&) = default;
|
|
IPCFrame& IPCFrame::operator=(const IPCFrame&) = default;
|
|
IPCFrame::IPCFrame(IPCFrame&&) noexcept = default;
|
|
IPCFrame& IPCFrame::operator=(IPCFrame&&) = default;
|
|
|
|
bool IPCFrame::operator==(const IPCFrame& other) const {
|
|
return unknown_fields_ == other.unknown_fields_
|
|
&& request_id_ == other.request_id_
|
|
&& msg_bind_service_ == other.msg_bind_service_
|
|
&& msg_bind_service_reply_ == other.msg_bind_service_reply_
|
|
&& msg_invoke_method_ == other.msg_invoke_method_
|
|
&& msg_invoke_method_reply_ == other.msg_invoke_method_reply_
|
|
&& msg_request_error_ == other.msg_request_error_
|
|
&& data_for_testing_ == other.data_for_testing_;
|
|
}
|
|
|
|
bool IPCFrame::ParseFromArray(const void* raw, size_t size) {
|
|
data_for_testing_.clear();
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 2 /* request_id */:
|
|
field.get(&request_id_);
|
|
break;
|
|
case 3 /* msg_bind_service */:
|
|
(*msg_bind_service_).ParseFromString(field.as_std_string());
|
|
break;
|
|
case 4 /* msg_bind_service_reply */:
|
|
(*msg_bind_service_reply_).ParseFromString(field.as_std_string());
|
|
break;
|
|
case 5 /* msg_invoke_method */:
|
|
(*msg_invoke_method_).ParseFromString(field.as_std_string());
|
|
break;
|
|
case 6 /* msg_invoke_method_reply */:
|
|
(*msg_invoke_method_reply_).ParseFromString(field.as_std_string());
|
|
break;
|
|
case 7 /* msg_request_error */:
|
|
(*msg_request_error_).ParseFromString(field.as_std_string());
|
|
break;
|
|
case 1 /* data_for_testing */:
|
|
data_for_testing_.emplace_back();
|
|
field.get(&data_for_testing_.back());
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string IPCFrame::SerializeAsString() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> IPCFrame::SerializeAsArray() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void IPCFrame::Serialize(::protozero::Message* msg) const {
|
|
// Field 2: request_id
|
|
if (_has_field_[2]) {
|
|
msg->AppendVarInt(2, request_id_);
|
|
}
|
|
|
|
// Field 3: msg_bind_service
|
|
if (_has_field_[3]) {
|
|
(*msg_bind_service_).Serialize(msg->BeginNestedMessage<::protozero::Message>(3));
|
|
}
|
|
|
|
// Field 4: msg_bind_service_reply
|
|
if (_has_field_[4]) {
|
|
(*msg_bind_service_reply_).Serialize(msg->BeginNestedMessage<::protozero::Message>(4));
|
|
}
|
|
|
|
// Field 5: msg_invoke_method
|
|
if (_has_field_[5]) {
|
|
(*msg_invoke_method_).Serialize(msg->BeginNestedMessage<::protozero::Message>(5));
|
|
}
|
|
|
|
// Field 6: msg_invoke_method_reply
|
|
if (_has_field_[6]) {
|
|
(*msg_invoke_method_reply_).Serialize(msg->BeginNestedMessage<::protozero::Message>(6));
|
|
}
|
|
|
|
// Field 7: msg_request_error
|
|
if (_has_field_[7]) {
|
|
(*msg_request_error_).Serialize(msg->BeginNestedMessage<::protozero::Message>(7));
|
|
}
|
|
|
|
// Field 1: data_for_testing
|
|
for (auto& it : data_for_testing_) {
|
|
msg->AppendString(1, it);
|
|
}
|
|
|
|
msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
|
|
}
|
|
|
|
|
|
IPCFrame_RequestError::IPCFrame_RequestError() = default;
|
|
IPCFrame_RequestError::~IPCFrame_RequestError() = default;
|
|
IPCFrame_RequestError::IPCFrame_RequestError(const IPCFrame_RequestError&) = default;
|
|
IPCFrame_RequestError& IPCFrame_RequestError::operator=(const IPCFrame_RequestError&) = default;
|
|
IPCFrame_RequestError::IPCFrame_RequestError(IPCFrame_RequestError&&) noexcept = default;
|
|
IPCFrame_RequestError& IPCFrame_RequestError::operator=(IPCFrame_RequestError&&) = default;
|
|
|
|
bool IPCFrame_RequestError::operator==(const IPCFrame_RequestError& other) const {
|
|
return unknown_fields_ == other.unknown_fields_
|
|
&& error_ == other.error_;
|
|
}
|
|
|
|
bool IPCFrame_RequestError::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* error */:
|
|
field.get(&error_);
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string IPCFrame_RequestError::SerializeAsString() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> IPCFrame_RequestError::SerializeAsArray() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void IPCFrame_RequestError::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: error
|
|
if (_has_field_[1]) {
|
|
msg->AppendString(1, error_);
|
|
}
|
|
|
|
msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
|
|
}
|
|
|
|
|
|
IPCFrame_InvokeMethodReply::IPCFrame_InvokeMethodReply() = default;
|
|
IPCFrame_InvokeMethodReply::~IPCFrame_InvokeMethodReply() = default;
|
|
IPCFrame_InvokeMethodReply::IPCFrame_InvokeMethodReply(const IPCFrame_InvokeMethodReply&) = default;
|
|
IPCFrame_InvokeMethodReply& IPCFrame_InvokeMethodReply::operator=(const IPCFrame_InvokeMethodReply&) = default;
|
|
IPCFrame_InvokeMethodReply::IPCFrame_InvokeMethodReply(IPCFrame_InvokeMethodReply&&) noexcept = default;
|
|
IPCFrame_InvokeMethodReply& IPCFrame_InvokeMethodReply::operator=(IPCFrame_InvokeMethodReply&&) = default;
|
|
|
|
bool IPCFrame_InvokeMethodReply::operator==(const IPCFrame_InvokeMethodReply& other) const {
|
|
return unknown_fields_ == other.unknown_fields_
|
|
&& success_ == other.success_
|
|
&& has_more_ == other.has_more_
|
|
&& reply_proto_ == other.reply_proto_;
|
|
}
|
|
|
|
bool IPCFrame_InvokeMethodReply::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* success */:
|
|
field.get(&success_);
|
|
break;
|
|
case 2 /* has_more */:
|
|
field.get(&has_more_);
|
|
break;
|
|
case 3 /* reply_proto */:
|
|
field.get(&reply_proto_);
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string IPCFrame_InvokeMethodReply::SerializeAsString() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> IPCFrame_InvokeMethodReply::SerializeAsArray() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void IPCFrame_InvokeMethodReply::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: success
|
|
if (_has_field_[1]) {
|
|
msg->AppendTinyVarInt(1, success_);
|
|
}
|
|
|
|
// Field 2: has_more
|
|
if (_has_field_[2]) {
|
|
msg->AppendTinyVarInt(2, has_more_);
|
|
}
|
|
|
|
// Field 3: reply_proto
|
|
if (_has_field_[3]) {
|
|
msg->AppendString(3, reply_proto_);
|
|
}
|
|
|
|
msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
|
|
}
|
|
|
|
|
|
IPCFrame_InvokeMethod::IPCFrame_InvokeMethod() = default;
|
|
IPCFrame_InvokeMethod::~IPCFrame_InvokeMethod() = default;
|
|
IPCFrame_InvokeMethod::IPCFrame_InvokeMethod(const IPCFrame_InvokeMethod&) = default;
|
|
IPCFrame_InvokeMethod& IPCFrame_InvokeMethod::operator=(const IPCFrame_InvokeMethod&) = default;
|
|
IPCFrame_InvokeMethod::IPCFrame_InvokeMethod(IPCFrame_InvokeMethod&&) noexcept = default;
|
|
IPCFrame_InvokeMethod& IPCFrame_InvokeMethod::operator=(IPCFrame_InvokeMethod&&) = default;
|
|
|
|
bool IPCFrame_InvokeMethod::operator==(const IPCFrame_InvokeMethod& other) const {
|
|
return unknown_fields_ == other.unknown_fields_
|
|
&& service_id_ == other.service_id_
|
|
&& method_id_ == other.method_id_
|
|
&& args_proto_ == other.args_proto_
|
|
&& drop_reply_ == other.drop_reply_;
|
|
}
|
|
|
|
bool IPCFrame_InvokeMethod::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* service_id */:
|
|
field.get(&service_id_);
|
|
break;
|
|
case 2 /* method_id */:
|
|
field.get(&method_id_);
|
|
break;
|
|
case 3 /* args_proto */:
|
|
field.get(&args_proto_);
|
|
break;
|
|
case 4 /* drop_reply */:
|
|
field.get(&drop_reply_);
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string IPCFrame_InvokeMethod::SerializeAsString() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> IPCFrame_InvokeMethod::SerializeAsArray() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void IPCFrame_InvokeMethod::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: service_id
|
|
if (_has_field_[1]) {
|
|
msg->AppendVarInt(1, service_id_);
|
|
}
|
|
|
|
// Field 2: method_id
|
|
if (_has_field_[2]) {
|
|
msg->AppendVarInt(2, method_id_);
|
|
}
|
|
|
|
// Field 3: args_proto
|
|
if (_has_field_[3]) {
|
|
msg->AppendString(3, args_proto_);
|
|
}
|
|
|
|
// Field 4: drop_reply
|
|
if (_has_field_[4]) {
|
|
msg->AppendTinyVarInt(4, drop_reply_);
|
|
}
|
|
|
|
msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
|
|
}
|
|
|
|
|
|
IPCFrame_BindServiceReply::IPCFrame_BindServiceReply() = default;
|
|
IPCFrame_BindServiceReply::~IPCFrame_BindServiceReply() = default;
|
|
IPCFrame_BindServiceReply::IPCFrame_BindServiceReply(const IPCFrame_BindServiceReply&) = default;
|
|
IPCFrame_BindServiceReply& IPCFrame_BindServiceReply::operator=(const IPCFrame_BindServiceReply&) = default;
|
|
IPCFrame_BindServiceReply::IPCFrame_BindServiceReply(IPCFrame_BindServiceReply&&) noexcept = default;
|
|
IPCFrame_BindServiceReply& IPCFrame_BindServiceReply::operator=(IPCFrame_BindServiceReply&&) = default;
|
|
|
|
bool IPCFrame_BindServiceReply::operator==(const IPCFrame_BindServiceReply& other) const {
|
|
return unknown_fields_ == other.unknown_fields_
|
|
&& success_ == other.success_
|
|
&& service_id_ == other.service_id_
|
|
&& methods_ == other.methods_;
|
|
}
|
|
|
|
bool IPCFrame_BindServiceReply::ParseFromArray(const void* raw, size_t size) {
|
|
methods_.clear();
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* success */:
|
|
field.get(&success_);
|
|
break;
|
|
case 2 /* service_id */:
|
|
field.get(&service_id_);
|
|
break;
|
|
case 3 /* methods */:
|
|
methods_.emplace_back();
|
|
methods_.back().ParseFromString(field.as_std_string());
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string IPCFrame_BindServiceReply::SerializeAsString() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> IPCFrame_BindServiceReply::SerializeAsArray() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void IPCFrame_BindServiceReply::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: success
|
|
if (_has_field_[1]) {
|
|
msg->AppendTinyVarInt(1, success_);
|
|
}
|
|
|
|
// Field 2: service_id
|
|
if (_has_field_[2]) {
|
|
msg->AppendVarInt(2, service_id_);
|
|
}
|
|
|
|
// Field 3: methods
|
|
for (auto& it : methods_) {
|
|
it.Serialize(msg->BeginNestedMessage<::protozero::Message>(3));
|
|
}
|
|
|
|
msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
|
|
}
|
|
|
|
|
|
IPCFrame_BindServiceReply_MethodInfo::IPCFrame_BindServiceReply_MethodInfo() = default;
|
|
IPCFrame_BindServiceReply_MethodInfo::~IPCFrame_BindServiceReply_MethodInfo() = default;
|
|
IPCFrame_BindServiceReply_MethodInfo::IPCFrame_BindServiceReply_MethodInfo(const IPCFrame_BindServiceReply_MethodInfo&) = default;
|
|
IPCFrame_BindServiceReply_MethodInfo& IPCFrame_BindServiceReply_MethodInfo::operator=(const IPCFrame_BindServiceReply_MethodInfo&) = default;
|
|
IPCFrame_BindServiceReply_MethodInfo::IPCFrame_BindServiceReply_MethodInfo(IPCFrame_BindServiceReply_MethodInfo&&) noexcept = default;
|
|
IPCFrame_BindServiceReply_MethodInfo& IPCFrame_BindServiceReply_MethodInfo::operator=(IPCFrame_BindServiceReply_MethodInfo&&) = default;
|
|
|
|
bool IPCFrame_BindServiceReply_MethodInfo::operator==(const IPCFrame_BindServiceReply_MethodInfo& other) const {
|
|
return unknown_fields_ == other.unknown_fields_
|
|
&& id_ == other.id_
|
|
&& name_ == other.name_;
|
|
}
|
|
|
|
bool IPCFrame_BindServiceReply_MethodInfo::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* id */:
|
|
field.get(&id_);
|
|
break;
|
|
case 2 /* name */:
|
|
field.get(&name_);
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string IPCFrame_BindServiceReply_MethodInfo::SerializeAsString() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> IPCFrame_BindServiceReply_MethodInfo::SerializeAsArray() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void IPCFrame_BindServiceReply_MethodInfo::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: id
|
|
if (_has_field_[1]) {
|
|
msg->AppendVarInt(1, id_);
|
|
}
|
|
|
|
// Field 2: name
|
|
if (_has_field_[2]) {
|
|
msg->AppendString(2, name_);
|
|
}
|
|
|
|
msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
|
|
}
|
|
|
|
|
|
IPCFrame_BindService::IPCFrame_BindService() = default;
|
|
IPCFrame_BindService::~IPCFrame_BindService() = default;
|
|
IPCFrame_BindService::IPCFrame_BindService(const IPCFrame_BindService&) = default;
|
|
IPCFrame_BindService& IPCFrame_BindService::operator=(const IPCFrame_BindService&) = default;
|
|
IPCFrame_BindService::IPCFrame_BindService(IPCFrame_BindService&&) noexcept = default;
|
|
IPCFrame_BindService& IPCFrame_BindService::operator=(IPCFrame_BindService&&) = default;
|
|
|
|
bool IPCFrame_BindService::operator==(const IPCFrame_BindService& other) const {
|
|
return unknown_fields_ == other.unknown_fields_
|
|
&& service_name_ == other.service_name_;
|
|
}
|
|
|
|
bool IPCFrame_BindService::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* service_name */:
|
|
field.get(&service_name_);
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string IPCFrame_BindService::SerializeAsString() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> IPCFrame_BindService::SerializeAsArray() const {
|
|
::protozero::HeapBuffered<::protozero::Message> msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void IPCFrame_BindService::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: service_name
|
|
if (_has_field_[1]) {
|
|
msg->AppendString(1, service_name_);
|
|
}
|
|
|
|
msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
|
|
}
|
|
|
|
} // namespace perfetto
|
|
} // namespace protos
|
|
} // namespace gen
|
|
#pragma GCC diagnostic pop
|
|
// gen_amalgamated begin source: src/ipc/buffered_frame_deserializer.cc
|
|
// gen_amalgamated begin header: src/ipc/buffered_frame_deserializer.h
|
|
// gen_amalgamated begin header: include/perfetto/ext/ipc/basic_types.h
|
|
/*
|
|
* Copyright (C) 2017 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
#ifndef INCLUDE_PERFETTO_EXT_IPC_BASIC_TYPES_H_
|
|
#define INCLUDE_PERFETTO_EXT_IPC_BASIC_TYPES_H_
|
|
|
|
#include <stddef.h>
|
|
#include <stdint.h>
|
|
#include <sys/types.h>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/cpp_message_obj.h"
|
|
|
|
namespace perfetto {
|
|
namespace ipc {
|
|
|
|
using ProtoMessage = ::protozero::CppMessageObj;
|
|
using ServiceID = uint32_t;
|
|
using MethodID = uint32_t;
|
|
using ClientID = uint64_t;
|
|
using RequestID = uint64_t;
|
|
|
|
// This determines the maximum size allowed for an IPC message. Trying to send
|
|
// or receive a larger message will hit DCHECK(s) and auto-disconnect.
|
|
constexpr size_t kIPCBufferSize = 128 * 1024;
|
|
|
|
constexpr uid_t kInvalidUid = static_cast<uid_t>(-1);
|
|
|
|
} // namespace ipc
|
|
} // namespace perfetto
|
|
|
|
#endif // INCLUDE_PERFETTO_EXT_IPC_BASIC_TYPES_H_
|
|
/*
|
|
* Copyright (C) 2017 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
#ifndef SRC_IPC_BUFFERED_FRAME_DESERIALIZER_H_
|
|
#define SRC_IPC_BUFFERED_FRAME_DESERIALIZER_H_
|
|
|
|
#include <stddef.h>
|
|
|
|
#include <list>
|
|
#include <memory>
|
|
|
|
#include <sys/mman.h>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/paged_memory.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/utils.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/ipc/basic_types.h"
|
|
|
|
namespace perfetto {
|
|
|
|
namespace protos {
|
|
namespace gen {
|
|
class IPCFrame;
|
|
} // namespace gen
|
|
} // namespace protos
|
|
|
|
namespace ipc {
|
|
|
|
using Frame = ::perfetto::protos::gen::IPCFrame;
|
|
|
|
// Deserializes incoming frames, taking care of buffering and tokenization.
|
|
// Used by both host and client to decode incoming frames.
|
|
//
|
|
// Which problem does it solve?
|
|
// ----------------------------
|
|
// The wire protocol is as follows:
|
|
// [32-bit frame size][proto-encoded Frame], e.g:
|
|
// [06 00 00 00][00 11 22 33 44 55 66]
|
|
// [02 00 00 00][AA BB]
|
|
// [04 00 00 00][CC DD EE FF]
|
|
// However, given that the socket works in SOCK_STREAM mode, the recv() calls
|
|
// might see the following:
|
|
// 06 00 00
|
|
// 00 00 11 22 33 44 55
|
|
// 66 02 00 00 00 ...
|
|
// This class takes care of buffering efficiently the data received, without
|
|
// making any assumption on how the incoming data will be chunked by the socket.
|
|
// For instance, it is possible that a recv() doesn't produce any frame (because
|
|
// it received only a part of the frame) or produces more than one frame.
|
|
//
|
|
// Usage
|
|
// -----
|
|
// Both host and client use this as follows:
|
|
//
|
|
// auto buf = rpc_frame_decoder.BeginReceive();
|
|
// size_t rsize = socket.recv(buf.first, buf.second);
|
|
// rpc_frame_decoder.EndReceive(rsize);
|
|
// while (Frame frame = rpc_frame_decoder.PopNextFrame()) {
|
|
// ... process |frame|
|
|
// }
|
|
//
|
|
// Design goals:
|
|
// -------------
|
|
// - Optimize for the realistic case of each recv() receiving one or more
|
|
// whole frames. In this case no memmove is performed.
|
|
// - Guarantee that frames lay in a virtually contiguous memory area.
|
|
// This allows to use the protobuf-lite deserialization API (scattered
|
|
// deserialization is supported only by libprotobuf-full).
|
|
// - Put a hard boundary to the size of the incoming buffer. This is to prevent
|
|
// that a malicious sends an abnormally large frame and OOMs us.
|
|
// - Simplicity: just use a linear mmap region. No reallocations or scattering.
|
|
// Takes care of madvise()-ing unused memory.
|
|
|
|
class BufferedFrameDeserializer {
|
|
public:
|
|
struct ReceiveBuffer {
|
|
char* data;
|
|
size_t size;
|
|
};
|
|
|
|
// |max_capacity| is overridable only for tests.
|
|
explicit BufferedFrameDeserializer(size_t max_capacity = kIPCBufferSize);
|
|
~BufferedFrameDeserializer();
|
|
|
|
// This function doesn't really belong here as it does Serialization, unlike
|
|
// the rest of this class. However it is so small and has so many dependencies
|
|
// in common that doesn't justify having its own class.
|
|
static std::string Serialize(const Frame&);
|
|
|
|
// Returns a buffer that can be passed to recv(). The buffer is deliberately
|
|
// not initialized.
|
|
ReceiveBuffer BeginReceive();
|
|
|
|
// Must be called soon after BeginReceive().
|
|
// |recv_size| is the number of valid bytes that have been written into the
|
|
// buffer previously returned by BeginReceive() (the return value of recv()).
|
|
// Returns false if a header > |max_capacity| is received, in which case the
|
|
// caller is expected to shutdown the socket and terminate the ipc.
|
|
bool EndReceive(size_t recv_size) PERFETTO_WARN_UNUSED_RESULT;
|
|
|
|
// Decodes and returns the next decoded frame in the buffer if any, nullptr
|
|
// if no further frames have been decoded.
|
|
std::unique_ptr<Frame> PopNextFrame();
|
|
|
|
size_t capacity() const { return capacity_; }
|
|
size_t size() const { return size_; }
|
|
|
|
private:
|
|
BufferedFrameDeserializer(const BufferedFrameDeserializer&) = delete;
|
|
BufferedFrameDeserializer& operator=(const BufferedFrameDeserializer&) =
|
|
delete;
|
|
|
|
// If a valid frame is decoded it is added to |decoded_frames_|.
|
|
void DecodeFrame(const char*, size_t);
|
|
|
|
char* buf() { return reinterpret_cast<char*>(buf_.Get()); }
|
|
|
|
base::PagedMemory buf_;
|
|
const size_t capacity_ = 0; // sizeof(|buf_|).
|
|
|
|
// THe number of bytes in |buf_| that contain valid data (as a result of
|
|
// EndReceive()). This is always <= |capacity_|.
|
|
size_t size_ = 0;
|
|
|
|
std::list<std::unique_ptr<Frame>> decoded_frames_;
|
|
};
|
|
|
|
} // namespace ipc
|
|
} // namespace perfetto
|
|
|
|
#endif // SRC_IPC_BUFFERED_FRAME_DESERIALIZER_H_
|
|
/*
|
|
* Copyright (C) 2017 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
// gen_amalgamated expanded: #include "src/ipc/buffered_frame_deserializer.h"
|
|
|
|
#include <inttypes.h>
|
|
|
|
#include <algorithm>
|
|
#include <type_traits>
|
|
#include <utility>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/base/logging.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/utils.h"
|
|
|
|
// gen_amalgamated expanded: #include "protos/perfetto/ipc/wire_protocol.gen.h"
|
|
|
|
namespace perfetto {
|
|
namespace ipc {
|
|
|
|
namespace {
|
|
|
|
// The header is just the number of bytes of the Frame protobuf message.
|
|
constexpr size_t kHeaderSize = sizeof(uint32_t);
|
|
} // namespace
|
|
|
|
BufferedFrameDeserializer::BufferedFrameDeserializer(size_t max_capacity)
|
|
: capacity_(max_capacity) {
|
|
PERFETTO_CHECK(max_capacity % base::GetSysPageSize() == 0);
|
|
PERFETTO_CHECK(max_capacity > base::GetSysPageSize());
|
|
}
|
|
|
|
BufferedFrameDeserializer::~BufferedFrameDeserializer() = default;
|
|
|
|
BufferedFrameDeserializer::ReceiveBuffer
|
|
BufferedFrameDeserializer::BeginReceive() {
|
|
// Upon the first recv initialize the buffer to the max message size but
|
|
// release the physical memory for all but the first page. The kernel will
|
|
// automatically give us physical pages back as soon as we page-fault on them.
|
|
if (!buf_.IsValid()) {
|
|
PERFETTO_DCHECK(size_ == 0);
|
|
// TODO(eseckler): Don't commit all of the buffer at once on Windows.
|
|
buf_ = base::PagedMemory::Allocate(capacity_);
|
|
|
|
// Surely we are going to use at least the first page, but we may not need
|
|
// the rest for a bit.
|
|
const auto page_size = base::GetSysPageSize();
|
|
buf_.AdviseDontNeed(buf() + page_size, capacity_ - page_size);
|
|
}
|
|
|
|
PERFETTO_CHECK(capacity_ > size_);
|
|
return ReceiveBuffer{buf() + size_, capacity_ - size_};
|
|
}
|
|
|
|
bool BufferedFrameDeserializer::EndReceive(size_t recv_size) {
|
|
const auto page_size = base::GetSysPageSize();
|
|
PERFETTO_CHECK(recv_size + size_ <= capacity_);
|
|
size_ += recv_size;
|
|
|
|
// At this point the contents buf_ can contain:
|
|
// A) Only a fragment of the header (the size of the frame). E.g.,
|
|
// 03 00 00 (the header is 4 bytes, one is missing).
|
|
//
|
|
// B) A header and a part of the frame. E.g.,
|
|
// 05 00 00 00 11 22 33
|
|
// [ header, size=5 ] [ Partial frame ]
|
|
//
|
|
// C) One or more complete header+frame. E.g.,
|
|
// 05 00 00 00 11 22 33 44 55 03 00 00 00 AA BB CC
|
|
// [ header, size=5 ] [ Whole frame ] [ header, size=3 ] [ Whole frame ]
|
|
//
|
|
// D) Some complete header+frame(s) and a partial header or frame (C + A/B).
|
|
//
|
|
// C Is the more likely case and the one we are optimizing for. A, B, D can
|
|
// happen because of the streaming nature of the socket.
|
|
// The invariant of this function is that, when it returns, buf_ is either
|
|
// empty (we drained all the complete frames) or starts with the header of the
|
|
// next, still incomplete, frame.
|
|
|
|
size_t consumed_size = 0;
|
|
for (;;) {
|
|
if (size_ < consumed_size + kHeaderSize)
|
|
break; // Case A, not enough data to read even the header.
|
|
|
|
// Read the header into |payload_size|.
|
|
uint32_t payload_size = 0;
|
|
const char* rd_ptr = buf() + consumed_size;
|
|
memcpy(base::AssumeLittleEndian(&payload_size), rd_ptr, kHeaderSize);
|
|
|
|
// Saturate the |payload_size| to prevent overflows. The > capacity_ check
|
|
// below will abort the parsing.
|
|
size_t next_frame_size =
|
|
std::min(static_cast<size_t>(payload_size), capacity_);
|
|
next_frame_size += kHeaderSize;
|
|
rd_ptr += kHeaderSize;
|
|
|
|
if (size_ < consumed_size + next_frame_size) {
|
|
// Case B. We got the header but not the whole frame.
|
|
if (next_frame_size > capacity_) {
|
|
// The caller is expected to shut down the socket and give up at this
|
|
// point. If it doesn't do that and insists going on at some point it
|
|
// will hit the capacity check in BeginReceive().
|
|
PERFETTO_LOG("IPC Frame too large (size %zu)", next_frame_size);
|
|
return false;
|
|
}
|
|
break;
|
|
}
|
|
|
|
// Case C. We got at least one header and whole frame.
|
|
DecodeFrame(rd_ptr, payload_size);
|
|
consumed_size += next_frame_size;
|
|
}
|
|
|
|
PERFETTO_DCHECK(consumed_size <= size_);
|
|
if (consumed_size > 0) {
|
|
// Shift out the consumed data from the buffer. In the typical case (C)
|
|
// there is nothing to shift really, just setting size_ = 0 is enough.
|
|
// Shifting is only for the (unlikely) case D.
|
|
size_ -= consumed_size;
|
|
if (size_ > 0) {
|
|
// Case D. We consumed some frames but there is a leftover at the end of
|
|
// the buffer. Shift out the consumed bytes, so that on the next round
|
|
// |buf_| starts with the header of the next unconsumed frame.
|
|
const char* move_begin = buf() + consumed_size;
|
|
PERFETTO_CHECK(move_begin > buf());
|
|
PERFETTO_CHECK(move_begin + size_ <= buf() + capacity_);
|
|
memmove(buf(), move_begin, size_);
|
|
}
|
|
// If we just finished decoding a large frame that used more than one page,
|
|
// release the extra memory in the buffer. Large frames should be quite
|
|
// rare.
|
|
if (consumed_size > page_size) {
|
|
size_t size_rounded_up = (size_ / page_size + 1) * page_size;
|
|
if (size_rounded_up < capacity_) {
|
|
char* madvise_begin = buf() + size_rounded_up;
|
|
const size_t madvise_size = capacity_ - size_rounded_up;
|
|
PERFETTO_CHECK(madvise_begin > buf() + size_);
|
|
PERFETTO_CHECK(madvise_begin + madvise_size <= buf() + capacity_);
|
|
buf_.AdviseDontNeed(madvise_begin, madvise_size);
|
|
}
|
|
}
|
|
}
|
|
// At this point |size_| == 0 for case C, > 0 for cases A, B, D.
|
|
return true;
|
|
}
|
|
|
|
std::unique_ptr<Frame> BufferedFrameDeserializer::PopNextFrame() {
|
|
if (decoded_frames_.empty())
|
|
return nullptr;
|
|
std::unique_ptr<Frame> frame = std::move(decoded_frames_.front());
|
|
decoded_frames_.pop_front();
|
|
return frame;
|
|
}
|
|
|
|
void BufferedFrameDeserializer::DecodeFrame(const char* data, size_t size) {
|
|
if (size == 0)
|
|
return;
|
|
std::unique_ptr<Frame> frame(new Frame);
|
|
if (frame->ParseFromArray(data, size))
|
|
decoded_frames_.push_back(std::move(frame));
|
|
}
|
|
|
|
// static
|
|
std::string BufferedFrameDeserializer::Serialize(const Frame& frame) {
|
|
std::vector<uint8_t> payload = frame.SerializeAsArray();
|
|
const uint32_t payload_size = static_cast<uint32_t>(payload.size());
|
|
std::string buf;
|
|
buf.resize(kHeaderSize + payload_size);
|
|
memcpy(&buf[0], base::AssumeLittleEndian(&payload_size), kHeaderSize);
|
|
memcpy(&buf[kHeaderSize], payload.data(), payload.size());
|
|
return buf;
|
|
}
|
|
|
|
} // namespace ipc
|
|
} // namespace perfetto
|
|
// gen_amalgamated begin source: src/ipc/deferred.cc
|
|
// gen_amalgamated begin header: include/perfetto/ext/ipc/deferred.h
|
|
// gen_amalgamated begin header: include/perfetto/ext/ipc/async_result.h
|
|
/*
|
|
* Copyright (C) 2017 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
#ifndef INCLUDE_PERFETTO_EXT_IPC_ASYNC_RESULT_H_
|
|
#define INCLUDE_PERFETTO_EXT_IPC_ASYNC_RESULT_H_
|
|
|
|
#include <memory>
|
|
#include <type_traits>
|
|
#include <utility>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/ext/ipc/basic_types.h"
|
|
|
|
namespace perfetto {
|
|
namespace ipc {
|
|
|
|
// Wraps the result of an asynchronous invocation. This is the equivalent of a
|
|
// std::pair<unique_ptr<T>, bool> with syntactic sugar. It is used as callback
|
|
// argument by Deferred<T>. T is a ProtoMessage subclass (i.e. generated .pb.h).
|
|
template <typename T>
|
|
class AsyncResult {
|
|
public:
|
|
static AsyncResult Create() {
|
|
return AsyncResult(std::unique_ptr<T>(new T()));
|
|
}
|
|
|
|
AsyncResult(std::unique_ptr<T> msg = nullptr,
|
|
bool has_more = false,
|
|
int fd = -1)
|
|
: msg_(std::move(msg)), has_more_(has_more), fd_(fd) {
|
|
static_assert(std::is_base_of<ProtoMessage, T>::value, "T->ProtoMessage");
|
|
}
|
|
AsyncResult(AsyncResult&&) noexcept = default;
|
|
AsyncResult& operator=(AsyncResult&&) = default;
|
|
|
|
bool success() const { return !!msg_; }
|
|
explicit operator bool() const { return success(); }
|
|
|
|
bool has_more() const { return has_more_; }
|
|
void set_has_more(bool has_more) { has_more_ = has_more; }
|
|
|
|
void set_msg(std::unique_ptr<T> msg) { msg_ = std::move(msg); }
|
|
T* release_msg() { return msg_.release(); }
|
|
T* operator->() { return msg_.get(); }
|
|
T& operator*() { return *msg_; }
|
|
|
|
void set_fd(int fd) { fd_ = fd; }
|
|
int fd() const { return fd_; }
|
|
|
|
private:
|
|
std::unique_ptr<T> msg_;
|
|
bool has_more_ = false;
|
|
|
|
// Optional. Only for messages that convey a file descriptor, for sharing
|
|
// memory across processes.
|
|
int fd_ = -1;
|
|
};
|
|
|
|
} // namespace ipc
|
|
} // namespace perfetto
|
|
|
|
#endif // INCLUDE_PERFETTO_EXT_IPC_ASYNC_RESULT_H_
|
|
/*
|
|
* Copyright (C) 2017 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
#ifndef INCLUDE_PERFETTO_EXT_IPC_DEFERRED_H_
|
|
#define INCLUDE_PERFETTO_EXT_IPC_DEFERRED_H_
|
|
|
|
#include <functional>
|
|
#include <memory>
|
|
#include <utility>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/ext/ipc/async_result.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/ipc/basic_types.h"
|
|
|
|
namespace perfetto {
|
|
namespace ipc {
|
|
|
|
// This class is a wrapper for a callback handling async results.
|
|
// The problem this is solving is the following: For each result argument of the
|
|
// methods generated from the .proto file:
|
|
// - The client wants to see something on which it can Bind() a callback, which
|
|
// is invoked asynchronously once reply is received from the host.
|
|
// - The host wants to expose something to user code that implements the IPC
|
|
// methods to allow them to provide an asynchronous reply back to the client.
|
|
// Eventually even more than once, for the case streaming replies.
|
|
//
|
|
// In both cases we want to make sure that callbacks don't get lost along the
|
|
// way. To address this, this class will automatically reject the callbacks
|
|
// if they are not resolved at destructor time (or the object is std::move()'d).
|
|
//
|
|
// The client is supposed to use this class as follows:
|
|
// class GreeterProxy {
|
|
// void SayHello(const HelloRequest&, Deferred<HelloReply> reply)
|
|
// }
|
|
// ...
|
|
// Deferred<HelloReply> reply;
|
|
// reply.Bind([] (AsyncResult<HelloReply> reply) {
|
|
// std::cout << reply.success() ? reply->message : "failure";
|
|
// });
|
|
// host_proxy_instance.SayHello(req, std::move(reply));
|
|
//
|
|
// The host instead is supposed to use this as follows:
|
|
// class GreeterImpl : public Greeter {
|
|
// void SayHello(const HelloRequest& req, Deferred<HelloReply> reply) {
|
|
// AsyncResult<HelloReply> reply = AsyncResult<HelloReply>::Create();
|
|
// reply->set_greeting("Hello " + req.name)
|
|
// reply.Resolve(std::move(reply));
|
|
// }
|
|
// }
|
|
// Or for more complex cases, the deferred object can be std::move()'d outside
|
|
// and the reply can continue asynchronously later.
|
|
|
|
template <typename T>
|
|
class Deferred;
|
|
|
|
class DeferredBase {
|
|
public:
|
|
explicit DeferredBase(
|
|
std::function<void(AsyncResult<ProtoMessage>)> callback = nullptr);
|
|
|
|
template <typename T>
|
|
explicit DeferredBase(Deferred<T> other)
|
|
: callback_(std::move(other.callback_)) {}
|
|
|
|
~DeferredBase();
|
|
DeferredBase(DeferredBase&&) noexcept;
|
|
DeferredBase& operator=(DeferredBase&&);
|
|
void Bind(std::function<void(AsyncResult<ProtoMessage>)> callback);
|
|
bool IsBound() const;
|
|
void Resolve(AsyncResult<ProtoMessage>);
|
|
void Reject();
|
|
|
|
protected:
|
|
template <typename T>
|
|
friend class Deferred;
|
|
void Move(DeferredBase&);
|
|
|
|
std::function<void(AsyncResult<ProtoMessage>)> callback_;
|
|
};
|
|
|
|
template <typename T> // T : ProtoMessage subclass
|
|
class Deferred : public DeferredBase {
|
|
public:
|
|
explicit Deferred(std::function<void(AsyncResult<T>)> callback = nullptr) {
|
|
Bind(std::move(callback));
|
|
}
|
|
|
|
// This move constructor (and the similar one in DeferredBase) is meant to be
|
|
// called only by the autogenerated code. The caller has to guarantee that the
|
|
// moved-from and moved-to types match. The behavior is otherwise undefined.
|
|
explicit Deferred(DeferredBase&& other) {
|
|
callback_ = std::move(other.callback_);
|
|
other.callback_ = nullptr;
|
|
}
|
|
|
|
void Bind(std::function<void(AsyncResult<T>)> callback) {
|
|
if (!callback)
|
|
return;
|
|
|
|
// Here we need a callback adapter to downcast the callback to a generic
|
|
// callback that takes an AsyncResult<ProtoMessage>, so that it can be
|
|
// stored in the base class |callback_|.
|
|
auto callback_adapter = [callback](
|
|
AsyncResult<ProtoMessage> async_result_base) {
|
|
// Upcast the async_result from <ProtoMessage> -> <T : ProtoMessage>.
|
|
static_assert(std::is_base_of<ProtoMessage, T>::value, "T:ProtoMessage");
|
|
AsyncResult<T> async_result(
|
|
std::unique_ptr<T>(static_cast<T*>(async_result_base.release_msg())),
|
|
async_result_base.has_more(), async_result_base.fd());
|
|
callback(std::move(async_result));
|
|
};
|
|
DeferredBase::Bind(callback_adapter);
|
|
}
|
|
|
|
// If no more messages are expected, |callback_| is released.
|
|
void Resolve(AsyncResult<T> async_result) {
|
|
// Convert the |async_result| to the generic base one (T -> ProtoMessage).
|
|
AsyncResult<ProtoMessage> async_result_base(
|
|
std::unique_ptr<ProtoMessage>(async_result.release_msg()),
|
|
async_result.has_more(), async_result.fd());
|
|
DeferredBase::Resolve(std::move(async_result_base));
|
|
}
|
|
};
|
|
|
|
} // namespace ipc
|
|
} // namespace perfetto
|
|
|
|
#endif // INCLUDE_PERFETTO_EXT_IPC_DEFERRED_H_
|
|
/*
|
|
* Copyright (C) 2017 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/ext/ipc/deferred.h"
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/base/logging.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/utils.h"
|
|
|
|
namespace perfetto {
|
|
namespace ipc {
|
|
|
|
DeferredBase::DeferredBase(
|
|
std::function<void(AsyncResult<ProtoMessage>)> callback)
|
|
: callback_(std::move(callback)) {}
|
|
|
|
DeferredBase::~DeferredBase() {
|
|
if (callback_)
|
|
Reject();
|
|
}
|
|
|
|
// Can't just use "= default" here because the default move operator for
|
|
// std::function doesn't necessarily swap and hence can leave a copy of the
|
|
// bind state around, which is undesirable.
|
|
DeferredBase::DeferredBase(DeferredBase&& other) noexcept {
|
|
Move(other);
|
|
}
|
|
|
|
DeferredBase& DeferredBase::operator=(DeferredBase&& other) {
|
|
if (callback_)
|
|
Reject();
|
|
Move(other);
|
|
return *this;
|
|
}
|
|
|
|
void DeferredBase::Move(DeferredBase& other) {
|
|
callback_ = std::move(other.callback_);
|
|
other.callback_ = nullptr;
|
|
}
|
|
|
|
void DeferredBase::Bind(
|
|
std::function<void(AsyncResult<ProtoMessage>)> callback) {
|
|
callback_ = std::move(callback);
|
|
}
|
|
|
|
bool DeferredBase::IsBound() const {
|
|
return !!callback_;
|
|
}
|
|
|
|
void DeferredBase::Resolve(AsyncResult<ProtoMessage> async_result) {
|
|
if (!callback_) {
|
|
PERFETTO_DFATAL("No callback set.");
|
|
return;
|
|
}
|
|
bool has_more = async_result.has_more();
|
|
callback_(std::move(async_result));
|
|
if (!has_more)
|
|
callback_ = nullptr;
|
|
}
|
|
|
|
// Resolves with a nullptr |msg_|, signalling failure to |callback_|.
|
|
void DeferredBase::Reject() {
|
|
Resolve(AsyncResult<ProtoMessage>());
|
|
}
|
|
|
|
} // namespace ipc
|
|
} // namespace perfetto
|
|
// gen_amalgamated begin source: src/ipc/virtual_destructors.cc
|
|
// gen_amalgamated begin header: include/perfetto/ext/ipc/client.h
|
|
/*
|
|
* Copyright (C) 2017 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
#ifndef INCLUDE_PERFETTO_EXT_IPC_CLIENT_H_
|
|
#define INCLUDE_PERFETTO_EXT_IPC_CLIENT_H_
|
|
|
|
#include <functional>
|
|
#include <memory>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/scoped_file.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/weak_ptr.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/ipc/basic_types.h"
|
|
|
|
namespace perfetto {
|
|
|
|
namespace base {
|
|
class TaskRunner;
|
|
} // namespace base
|
|
|
|
namespace ipc {
|
|
class ServiceProxy;
|
|
|
|
// The client-side class that talks to the host over the socket and multiplexes
|
|
// requests coming from the various autogenerated ServiceProxy stubs.
|
|
// This is meant to be used by the user code as follows:
|
|
// auto client = Client::CreateInstance("socket_name", task_runner);
|
|
// std::unique_ptr<GreeterService> svc(new GreeterService());
|
|
// client.BindService(svc);
|
|
// svc.OnConnect([] () {
|
|
// svc.SayHello(..., ...);
|
|
// });
|
|
class Client {
|
|
public:
|
|
static std::unique_ptr<Client> CreateInstance(const char* socket_name,
|
|
bool socket_retry,
|
|
base::TaskRunner*);
|
|
virtual ~Client();
|
|
|
|
virtual void BindService(base::WeakPtr<ServiceProxy>) = 0;
|
|
|
|
// There is no need to call this method explicitly. Destroying the
|
|
// ServiceProxy instance is sufficient and will automatically unbind it. This
|
|
// method is exposed only for the ServiceProxy destructor.
|
|
virtual void UnbindService(ServiceID) = 0;
|
|
|
|
// Returns (with move semantics) the last file descriptor received on the IPC
|
|
// channel. No buffering is performed: if a service sends two file descriptors
|
|
// and the caller doesn't read them immediately, the first one will be
|
|
// automatically closed when the second is received (and will hit a DCHECK in
|
|
// debug builds).
|
|
virtual base::ScopedFile TakeReceivedFD() = 0;
|
|
};
|
|
|
|
} // namespace ipc
|
|
} // namespace perfetto
|
|
|
|
#endif // INCLUDE_PERFETTO_EXT_IPC_CLIENT_H_
|
|
// gen_amalgamated begin header: include/perfetto/ext/ipc/host.h
|
|
/*
|
|
* Copyright (C) 2017 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
#ifndef INCLUDE_PERFETTO_EXT_IPC_HOST_H_
|
|
#define INCLUDE_PERFETTO_EXT_IPC_HOST_H_
|
|
|
|
#include <memory>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/scoped_file.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/ipc/basic_types.h"
|
|
|
|
namespace perfetto {
|
|
|
|
namespace base {
|
|
class TaskRunner;
|
|
} // namespace base
|
|
|
|
namespace ipc {
|
|
|
|
class Service;
|
|
|
|
// The host-side of the IPC layer. This class acts as a registry and request
|
|
// dispatcher. It listen on the UnixSocket |socket_name| for incoming requests
|
|
// (coming Client instances) and dispatches their requests to the various
|
|
// Services exposed.
|
|
class Host {
|
|
public:
|
|
// Creates an instance and starts listening on the given |socket_name|.
|
|
// Returns nullptr if listening on the socket fails.
|
|
static std::unique_ptr<Host> CreateInstance(const char* socket_name,
|
|
base::TaskRunner*);
|
|
|
|
// Like the above but takes a file descriptor to a pre-bound unix socket.
|
|
// Returns nullptr if listening on the socket fails.
|
|
static std::unique_ptr<Host> CreateInstance(base::ScopedFile socket_fd,
|
|
base::TaskRunner*);
|
|
|
|
virtual ~Host();
|
|
|
|
// Registers a new service and makes it available to remote IPC peers.
|
|
// All the exposed Service instances will be destroyed when destroying the
|
|
// Host instance if ExposeService succeeds and returns true, or immediately
|
|
// after the call in case of failure.
|
|
// Returns true if the register has been successfully registered, false in
|
|
// case of errors (e.g., another service with the same name is already
|
|
// registered).
|
|
virtual bool ExposeService(std::unique_ptr<Service>) = 0;
|
|
};
|
|
|
|
} // namespace ipc
|
|
} // namespace perfetto
|
|
|
|
#endif // INCLUDE_PERFETTO_EXT_IPC_HOST_H_
|
|
// gen_amalgamated begin header: include/perfetto/ext/ipc/service.h
|
|
// gen_amalgamated begin header: include/perfetto/ext/ipc/client_info.h
|
|
/*
|
|
* Copyright (C) 2017 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
#ifndef INCLUDE_PERFETTO_EXT_IPC_CLIENT_INFO_H_
|
|
#define INCLUDE_PERFETTO_EXT_IPC_CLIENT_INFO_H_
|
|
|
|
#include <unistd.h>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/base/logging.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/ipc/basic_types.h"
|
|
|
|
namespace perfetto {
|
|
namespace ipc {
|
|
|
|
// Passed to Service(s) to identify remote clients.
|
|
class ClientInfo {
|
|
public:
|
|
ClientInfo() = default;
|
|
ClientInfo(ClientID client_id, uid_t uid)
|
|
: client_id_(client_id), uid_(uid) {}
|
|
|
|
bool operator==(const ClientInfo& other) const {
|
|
return (client_id_ == other.client_id_ && uid_ == other.uid_);
|
|
}
|
|
bool operator!=(const ClientInfo& other) const { return !(*this == other); }
|
|
|
|
// For map<> and other sorted containers.
|
|
bool operator<(const ClientInfo& other) const {
|
|
PERFETTO_DCHECK(client_id_ != other.client_id_ || *this == other);
|
|
return client_id_ < other.client_id_;
|
|
}
|
|
|
|
bool is_valid() const { return client_id_ != 0; }
|
|
|
|
// A monotonic counter.
|
|
ClientID client_id() const { return client_id_; }
|
|
|
|
// Posix User ID. Comes from the kernel, can be trusted.
|
|
uid_t uid() const { return uid_; }
|
|
|
|
private:
|
|
ClientID client_id_ = 0;
|
|
uid_t uid_ = kInvalidUid;
|
|
};
|
|
|
|
} // namespace ipc
|
|
} // namespace perfetto
|
|
|
|
#endif // INCLUDE_PERFETTO_EXT_IPC_CLIENT_INFO_H_
|
|
/*
|
|
* Copyright (C) 2017 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
#ifndef INCLUDE_PERFETTO_EXT_IPC_SERVICE_H_
|
|
#define INCLUDE_PERFETTO_EXT_IPC_SERVICE_H_
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/base/logging.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/scoped_file.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/ipc/client_info.h"
|
|
|
|
namespace perfetto {
|
|
namespace ipc {
|
|
|
|
class ServiceDescriptor;
|
|
|
|
// The base class for all the autogenerated host-side service interfaces.
|
|
class Service {
|
|
public:
|
|
virtual ~Service();
|
|
|
|
// Overridden by the auto-generated class. Provides the list of methods and
|
|
// the protobuf (de)serialization functions for their arguments.
|
|
virtual const ServiceDescriptor& GetDescriptor() = 0;
|
|
|
|
// Invoked when a remote client disconnects. Use client_info() to obtain
|
|
// details about the client that disconnected.
|
|
virtual void OnClientDisconnected() {}
|
|
|
|
// Returns the ClientInfo for the current IPC request. Returns an invalid
|
|
// ClientInfo if called outside the scope of an IPC method.
|
|
const ClientInfo& client_info() {
|
|
PERFETTO_DCHECK(client_info_.is_valid());
|
|
return client_info_;
|
|
}
|
|
|
|
base::ScopedFile TakeReceivedFD() {
|
|
if (received_fd_)
|
|
return std::move(*received_fd_);
|
|
return base::ScopedFile();
|
|
}
|
|
|
|
private:
|
|
friend class HostImpl;
|
|
ClientInfo client_info_;
|
|
// This is a pointer because the received fd needs to remain owned by the
|
|
// ClientConnection, as we will provide it to all method invocations
|
|
// for that client until one of them calls Service::TakeReceivedFD.
|
|
//
|
|
// Different clients might have sent different FDs so this cannot be owned
|
|
// here.
|
|
//
|
|
// Note that this means that there can always only be one outstanding
|
|
// invocation per client that supplies an FD and the client needs to
|
|
// wait for this one to return before calling another one.
|
|
base::ScopedFile* received_fd_;
|
|
};
|
|
|
|
} // namespace ipc
|
|
} // namespace perfetto
|
|
|
|
#endif // INCLUDE_PERFETTO_EXT_IPC_SERVICE_H_
|
|
// gen_amalgamated begin header: include/perfetto/ext/ipc/service_proxy.h
|
|
/*
|
|
* Copyright (C) 2017 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
#ifndef INCLUDE_PERFETTO_EXT_IPC_SERVICE_PROXY_H_
|
|
#define INCLUDE_PERFETTO_EXT_IPC_SERVICE_PROXY_H_
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/ext/ipc/basic_types.h"
|
|
|
|
#include <assert.h>
|
|
|
|
#include <functional>
|
|
#include <map>
|
|
#include <memory>
|
|
#include <string>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/base/export.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/weak_ptr.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/ipc/deferred.h"
|
|
|
|
namespace perfetto {
|
|
namespace ipc {
|
|
|
|
class Client;
|
|
class ServiceDescriptor;
|
|
|
|
// The base class for the client-side autogenerated stubs that forward method
|
|
// invocations to the host. All the methods of this class are meant to be called
|
|
// only by the autogenerated code.
|
|
class PERFETTO_EXPORT ServiceProxy {
|
|
public:
|
|
class EventListener {
|
|
public:
|
|
virtual ~EventListener();
|
|
|
|
// Called once after Client::BindService() if the ServiceProxy has been
|
|
// successfully bound to the host. It is possible to start sending IPC
|
|
// requests soon after this.
|
|
virtual void OnConnect() {}
|
|
|
|
// Called if the connection fails to be established or drops after having
|
|
// been established.
|
|
virtual void OnDisconnect() {}
|
|
};
|
|
|
|
// Guarantees that no callback will happen after this object has been
|
|
// destroyed. The caller has to guarantee that the |event_listener| stays
|
|
// alive at least as long as the ServiceProxy instance.
|
|
explicit ServiceProxy(EventListener*);
|
|
virtual ~ServiceProxy();
|
|
|
|
void InitializeBinding(base::WeakPtr<Client>,
|
|
ServiceID,
|
|
std::map<std::string, MethodID>);
|
|
|
|
// Called by the IPC methods in the autogenerated classes.
|
|
void BeginInvoke(const std::string& method_name,
|
|
const ProtoMessage& request,
|
|
DeferredBase reply,
|
|
int fd = -1);
|
|
|
|
// Called by ClientImpl.
|
|
// |reply_args| == nullptr means request failure.
|
|
void EndInvoke(RequestID,
|
|
std::unique_ptr<ProtoMessage> reply_arg,
|
|
bool has_more);
|
|
|
|
// Called by ClientImpl.
|
|
void OnConnect(bool success);
|
|
void OnDisconnect();
|
|
bool connected() const { return service_id_ != 0; }
|
|
|
|
base::WeakPtr<ServiceProxy> GetWeakPtr() const;
|
|
|
|
// Implemented by the autogenerated class.
|
|
virtual const ServiceDescriptor& GetDescriptor() = 0;
|
|
|
|
private:
|
|
base::WeakPtr<Client> client_;
|
|
ServiceID service_id_ = 0;
|
|
std::map<std::string, MethodID> remote_method_ids_;
|
|
std::map<RequestID, DeferredBase> pending_callbacks_;
|
|
EventListener* const event_listener_;
|
|
base::WeakPtrFactory<ServiceProxy> weak_ptr_factory_; // Keep last.
|
|
};
|
|
|
|
} // namespace ipc
|
|
} // namespace perfetto
|
|
|
|
#endif // INCLUDE_PERFETTO_EXT_IPC_SERVICE_PROXY_H_
|
|
/*
|
|
* Copyright (C) 2018 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/ext/ipc/client.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/ipc/host.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/ipc/service.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/ipc/service_proxy.h"
|
|
|
|
// This translation unit contains the definitions for the destructor of pure
|
|
// virtual interfaces for the current build target. The alternative would be
|
|
// introducing a one-liner .cc file for each pure virtual interface, which is
|
|
// overkill. This is for compliance with -Wweak-vtables.
|
|
|
|
namespace perfetto {
|
|
namespace ipc {
|
|
|
|
Client::~Client() = default;
|
|
Host::~Host() = default;
|
|
Service::~Service() = default;
|
|
ServiceProxy::EventListener::~EventListener() = default;
|
|
|
|
} // namespace ipc
|
|
} // namespace perfetto
|
|
// gen_amalgamated begin source: gen/protos/perfetto/ipc/consumer_port.ipc.cc
|
|
// gen_amalgamated begin header: gen/protos/perfetto/ipc/consumer_port.ipc.h
|
|
// gen_amalgamated begin header: include/perfetto/ext/ipc/service_descriptor.h
|
|
/*
|
|
* Copyright (C) 2017 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
#ifndef INCLUDE_PERFETTO_EXT_IPC_SERVICE_DESCRIPTOR_H_
|
|
#define INCLUDE_PERFETTO_EXT_IPC_SERVICE_DESCRIPTOR_H_
|
|
|
|
#include <functional>
|
|
#include <string>
|
|
#include <utility>
|
|
#include <vector>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/ext/ipc/basic_types.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/ipc/deferred.h"
|
|
|
|
namespace perfetto {
|
|
namespace ipc {
|
|
|
|
class Service;
|
|
|
|
// This is a pure data structure which holds factory methods and strings for the
|
|
// services and their methods that get generated in the .h/.cc files.
|
|
// Each autogenerated class has a GetDescriptor() method that returns one
|
|
// instance of these and allows both client and hosts to map service and method
|
|
// names to IDs and provide function pointers to the protobuf decoder fuctions.
|
|
class ServiceDescriptor {
|
|
public:
|
|
struct Method {
|
|
const char* name;
|
|
|
|
// DecoderFunc is pointer to a function that takes a string in input
|
|
// containing protobuf encoded data and returns a decoded protobuf message.
|
|
using DecoderFunc = std::unique_ptr<ProtoMessage> (*)(const std::string&);
|
|
|
|
// Function pointer to decode the request argument of the method.
|
|
DecoderFunc request_proto_decoder;
|
|
|
|
// Function pointer to decoded the reply argument of the method.
|
|
DecoderFunc reply_proto_decoder;
|
|
|
|
// Function pointer that dispatches the generic request to the corresponding
|
|
// method implementation.
|
|
using InvokerFunc = void (*)(Service*,
|
|
const ProtoMessage& /* request_args */,
|
|
DeferredBase /* deferred_reply */);
|
|
InvokerFunc invoker;
|
|
};
|
|
|
|
const char* service_name = nullptr;
|
|
|
|
// Note that methods order is not stable. Client and Host might have different
|
|
// method indexes, depending on their versions. The Client can't just rely
|
|
// on the indexes and has to keep a [string -> remote index] translation map.
|
|
std::vector<Method> methods;
|
|
};
|
|
|
|
} // namespace ipc
|
|
} // namespace perfetto
|
|
|
|
#endif // INCLUDE_PERFETTO_EXT_IPC_SERVICE_DESCRIPTOR_H_
|
|
// DO NOT EDIT. Autogenerated by Perfetto IPC
|
|
#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_IPC_CONSUMER_PORT_PROTO_H_
|
|
#define PERFETTO_PROTOS_PROTOS_PERFETTO_IPC_CONSUMER_PORT_PROTO_H_
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/ext/ipc/deferred.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/ipc/service.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/ipc/service_descriptor.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/ipc/service_proxy.h"
|
|
|
|
// gen_amalgamated expanded: #include "protos/perfetto/ipc/consumer_port.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/common/observable_events.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/common/tracing_service_state.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/common/tracing_service_capabilities.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/common/trace_stats.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/trace_config.gen.h"
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace gen {
|
|
|
|
class ConsumerPort : public ::perfetto::ipc::Service {
|
|
private:
|
|
static ::perfetto::ipc::ServiceDescriptor* NewDescriptor();
|
|
|
|
public:
|
|
~ConsumerPort() override;
|
|
|
|
static const ::perfetto::ipc::ServiceDescriptor& GetDescriptorStatic();
|
|
|
|
// Service implementation.
|
|
const ::perfetto::ipc::ServiceDescriptor& GetDescriptor() override;
|
|
|
|
// Methods from the .proto file
|
|
using DeferredEnableTracingResponse = ::perfetto::ipc::Deferred<EnableTracingResponse>;
|
|
virtual void EnableTracing(const EnableTracingRequest&, DeferredEnableTracingResponse) = 0;
|
|
|
|
using DeferredDisableTracingResponse = ::perfetto::ipc::Deferred<DisableTracingResponse>;
|
|
virtual void DisableTracing(const DisableTracingRequest&, DeferredDisableTracingResponse) = 0;
|
|
|
|
using DeferredReadBuffersResponse = ::perfetto::ipc::Deferred<ReadBuffersResponse>;
|
|
virtual void ReadBuffers(const ReadBuffersRequest&, DeferredReadBuffersResponse) = 0;
|
|
|
|
using DeferredFreeBuffersResponse = ::perfetto::ipc::Deferred<FreeBuffersResponse>;
|
|
virtual void FreeBuffers(const FreeBuffersRequest&, DeferredFreeBuffersResponse) = 0;
|
|
|
|
using DeferredFlushResponse = ::perfetto::ipc::Deferred<FlushResponse>;
|
|
virtual void Flush(const FlushRequest&, DeferredFlushResponse) = 0;
|
|
|
|
using DeferredStartTracingResponse = ::perfetto::ipc::Deferred<StartTracingResponse>;
|
|
virtual void StartTracing(const StartTracingRequest&, DeferredStartTracingResponse) = 0;
|
|
|
|
using DeferredChangeTraceConfigResponse = ::perfetto::ipc::Deferred<ChangeTraceConfigResponse>;
|
|
virtual void ChangeTraceConfig(const ChangeTraceConfigRequest&, DeferredChangeTraceConfigResponse) = 0;
|
|
|
|
using DeferredDetachResponse = ::perfetto::ipc::Deferred<DetachResponse>;
|
|
virtual void Detach(const DetachRequest&, DeferredDetachResponse) = 0;
|
|
|
|
using DeferredAttachResponse = ::perfetto::ipc::Deferred<AttachResponse>;
|
|
virtual void Attach(const AttachRequest&, DeferredAttachResponse) = 0;
|
|
|
|
using DeferredGetTraceStatsResponse = ::perfetto::ipc::Deferred<GetTraceStatsResponse>;
|
|
virtual void GetTraceStats(const GetTraceStatsRequest&, DeferredGetTraceStatsResponse) = 0;
|
|
|
|
using DeferredObserveEventsResponse = ::perfetto::ipc::Deferred<ObserveEventsResponse>;
|
|
virtual void ObserveEvents(const ObserveEventsRequest&, DeferredObserveEventsResponse) = 0;
|
|
|
|
using DeferredQueryServiceStateResponse = ::perfetto::ipc::Deferred<QueryServiceStateResponse>;
|
|
virtual void QueryServiceState(const QueryServiceStateRequest&, DeferredQueryServiceStateResponse) = 0;
|
|
|
|
using DeferredQueryCapabilitiesResponse = ::perfetto::ipc::Deferred<QueryCapabilitiesResponse>;
|
|
virtual void QueryCapabilities(const QueryCapabilitiesRequest&, DeferredQueryCapabilitiesResponse) = 0;
|
|
|
|
};
|
|
|
|
|
|
class ConsumerPortProxy : public ::perfetto::ipc::ServiceProxy {
|
|
public:
|
|
explicit ConsumerPortProxy(::perfetto::ipc::ServiceProxy::EventListener*);
|
|
~ConsumerPortProxy() override;
|
|
|
|
// ServiceProxy implementation.
|
|
const ::perfetto::ipc::ServiceDescriptor& GetDescriptor() override;
|
|
|
|
// Methods from the .proto file
|
|
using DeferredEnableTracingResponse = ::perfetto::ipc::Deferred<EnableTracingResponse>;
|
|
void EnableTracing(const EnableTracingRequest&, DeferredEnableTracingResponse, int fd = -1);
|
|
|
|
using DeferredDisableTracingResponse = ::perfetto::ipc::Deferred<DisableTracingResponse>;
|
|
void DisableTracing(const DisableTracingRequest&, DeferredDisableTracingResponse, int fd = -1);
|
|
|
|
using DeferredReadBuffersResponse = ::perfetto::ipc::Deferred<ReadBuffersResponse>;
|
|
void ReadBuffers(const ReadBuffersRequest&, DeferredReadBuffersResponse, int fd = -1);
|
|
|
|
using DeferredFreeBuffersResponse = ::perfetto::ipc::Deferred<FreeBuffersResponse>;
|
|
void FreeBuffers(const FreeBuffersRequest&, DeferredFreeBuffersResponse, int fd = -1);
|
|
|
|
using DeferredFlushResponse = ::perfetto::ipc::Deferred<FlushResponse>;
|
|
void Flush(const FlushRequest&, DeferredFlushResponse, int fd = -1);
|
|
|
|
using DeferredStartTracingResponse = ::perfetto::ipc::Deferred<StartTracingResponse>;
|
|
void StartTracing(const StartTracingRequest&, DeferredStartTracingResponse, int fd = -1);
|
|
|
|
using DeferredChangeTraceConfigResponse = ::perfetto::ipc::Deferred<ChangeTraceConfigResponse>;
|
|
void ChangeTraceConfig(const ChangeTraceConfigRequest&, DeferredChangeTraceConfigResponse, int fd = -1);
|
|
|
|
using DeferredDetachResponse = ::perfetto::ipc::Deferred<DetachResponse>;
|
|
void Detach(const DetachRequest&, DeferredDetachResponse, int fd = -1);
|
|
|
|
using DeferredAttachResponse = ::perfetto::ipc::Deferred<AttachResponse>;
|
|
void Attach(const AttachRequest&, DeferredAttachResponse, int fd = -1);
|
|
|
|
using DeferredGetTraceStatsResponse = ::perfetto::ipc::Deferred<GetTraceStatsResponse>;
|
|
void GetTraceStats(const GetTraceStatsRequest&, DeferredGetTraceStatsResponse, int fd = -1);
|
|
|
|
using DeferredObserveEventsResponse = ::perfetto::ipc::Deferred<ObserveEventsResponse>;
|
|
void ObserveEvents(const ObserveEventsRequest&, DeferredObserveEventsResponse, int fd = -1);
|
|
|
|
using DeferredQueryServiceStateResponse = ::perfetto::ipc::Deferred<QueryServiceStateResponse>;
|
|
void QueryServiceState(const QueryServiceStateRequest&, DeferredQueryServiceStateResponse, int fd = -1);
|
|
|
|
using DeferredQueryCapabilitiesResponse = ::perfetto::ipc::Deferred<QueryCapabilitiesResponse>;
|
|
void QueryCapabilities(const QueryCapabilitiesRequest&, DeferredQueryCapabilitiesResponse, int fd = -1);
|
|
|
|
};
|
|
|
|
} // namespace perfetto
|
|
} // namespace protos
|
|
} // namespace gen
|
|
|
|
#endif // PERFETTO_PROTOS_PROTOS_PERFETTO_IPC_CONSUMER_PORT_PROTO_H_
|
|
// gen_amalgamated begin header: include/perfetto/ext/ipc/codegen_helpers.h
|
|
/*
|
|
* Copyright (C) 2017 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
// This file is only meant to be included in autogenerated .cc files.
|
|
|
|
#ifndef INCLUDE_PERFETTO_EXT_IPC_CODEGEN_HELPERS_H_
|
|
#define INCLUDE_PERFETTO_EXT_IPC_CODEGEN_HELPERS_H_
|
|
|
|
#include <memory>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/ext/ipc/basic_types.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/ipc/deferred.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/ipc/service.h"
|
|
|
|
// A templated protobuf message decoder. Returns nullptr in case of failure.
|
|
template <typename T>
|
|
::std::unique_ptr<::perfetto::ipc::ProtoMessage> _IPC_Decoder(
|
|
const std::string& proto_data) {
|
|
::std::unique_ptr<::perfetto::ipc::ProtoMessage> msg(new T());
|
|
if (msg->ParseFromString(proto_data))
|
|
return msg;
|
|
return nullptr;
|
|
}
|
|
|
|
// Templated method dispatcher. Used to obtain a function pointer to a given
|
|
// IPC method (Method) of a given service (TSvc) that can be invoked by the
|
|
// host-side machinery starting from a generic Service pointer and a generic
|
|
// ProtoMessage request argument.
|
|
template <typename TSvc, // Type of the actual Service subclass.
|
|
typename TReq, // Type of the request argument.
|
|
typename TReply, // Type of the reply argument.
|
|
void (TSvc::*Method)(const TReq&, ::perfetto::ipc::Deferred<TReply>)>
|
|
void _IPC_Invoker(::perfetto::ipc::Service* s,
|
|
const ::perfetto::ipc::ProtoMessage& req,
|
|
::perfetto::ipc::DeferredBase reply) {
|
|
(*static_cast<TSvc*>(s).*Method)(
|
|
static_cast<const TReq&>(req),
|
|
::perfetto::ipc::Deferred<TReply>(::std::move(reply)));
|
|
}
|
|
|
|
#endif // INCLUDE_PERFETTO_EXT_IPC_CODEGEN_HELPERS_H_
|
|
// DO NOT EDIT. Autogenerated by Perfetto IPC
|
|
// gen_amalgamated expanded: #include "protos/perfetto/ipc/consumer_port.ipc.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/ipc/codegen_helpers.h"
|
|
|
|
#include <memory>
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace gen {
|
|
::perfetto::ipc::ServiceDescriptor* ConsumerPort::NewDescriptor() {
|
|
auto* desc = new ::perfetto::ipc::ServiceDescriptor();
|
|
desc->service_name = "ConsumerPort";
|
|
|
|
desc->methods.emplace_back(::perfetto::ipc::ServiceDescriptor::Method{
|
|
"EnableTracing",
|
|
&_IPC_Decoder<EnableTracingRequest>,
|
|
&_IPC_Decoder<EnableTracingResponse>,
|
|
&_IPC_Invoker<ConsumerPort, EnableTracingRequest, EnableTracingResponse, &ConsumerPort::EnableTracing>});
|
|
|
|
desc->methods.emplace_back(::perfetto::ipc::ServiceDescriptor::Method{
|
|
"DisableTracing",
|
|
&_IPC_Decoder<DisableTracingRequest>,
|
|
&_IPC_Decoder<DisableTracingResponse>,
|
|
&_IPC_Invoker<ConsumerPort, DisableTracingRequest, DisableTracingResponse, &ConsumerPort::DisableTracing>});
|
|
|
|
desc->methods.emplace_back(::perfetto::ipc::ServiceDescriptor::Method{
|
|
"ReadBuffers",
|
|
&_IPC_Decoder<ReadBuffersRequest>,
|
|
&_IPC_Decoder<ReadBuffersResponse>,
|
|
&_IPC_Invoker<ConsumerPort, ReadBuffersRequest, ReadBuffersResponse, &ConsumerPort::ReadBuffers>});
|
|
|
|
desc->methods.emplace_back(::perfetto::ipc::ServiceDescriptor::Method{
|
|
"FreeBuffers",
|
|
&_IPC_Decoder<FreeBuffersRequest>,
|
|
&_IPC_Decoder<FreeBuffersResponse>,
|
|
&_IPC_Invoker<ConsumerPort, FreeBuffersRequest, FreeBuffersResponse, &ConsumerPort::FreeBuffers>});
|
|
|
|
desc->methods.emplace_back(::perfetto::ipc::ServiceDescriptor::Method{
|
|
"Flush",
|
|
&_IPC_Decoder<FlushRequest>,
|
|
&_IPC_Decoder<FlushResponse>,
|
|
&_IPC_Invoker<ConsumerPort, FlushRequest, FlushResponse, &ConsumerPort::Flush>});
|
|
|
|
desc->methods.emplace_back(::perfetto::ipc::ServiceDescriptor::Method{
|
|
"StartTracing",
|
|
&_IPC_Decoder<StartTracingRequest>,
|
|
&_IPC_Decoder<StartTracingResponse>,
|
|
&_IPC_Invoker<ConsumerPort, StartTracingRequest, StartTracingResponse, &ConsumerPort::StartTracing>});
|
|
|
|
desc->methods.emplace_back(::perfetto::ipc::ServiceDescriptor::Method{
|
|
"ChangeTraceConfig",
|
|
&_IPC_Decoder<ChangeTraceConfigRequest>,
|
|
&_IPC_Decoder<ChangeTraceConfigResponse>,
|
|
&_IPC_Invoker<ConsumerPort, ChangeTraceConfigRequest, ChangeTraceConfigResponse, &ConsumerPort::ChangeTraceConfig>});
|
|
|
|
desc->methods.emplace_back(::perfetto::ipc::ServiceDescriptor::Method{
|
|
"Detach",
|
|
&_IPC_Decoder<DetachRequest>,
|
|
&_IPC_Decoder<DetachResponse>,
|
|
&_IPC_Invoker<ConsumerPort, DetachRequest, DetachResponse, &ConsumerPort::Detach>});
|
|
|
|
desc->methods.emplace_back(::perfetto::ipc::ServiceDescriptor::Method{
|
|
"Attach",
|
|
&_IPC_Decoder<AttachRequest>,
|
|
&_IPC_Decoder<AttachResponse>,
|
|
&_IPC_Invoker<ConsumerPort, AttachRequest, AttachResponse, &ConsumerPort::Attach>});
|
|
|
|
desc->methods.emplace_back(::perfetto::ipc::ServiceDescriptor::Method{
|
|
"GetTraceStats",
|
|
&_IPC_Decoder<GetTraceStatsRequest>,
|
|
&_IPC_Decoder<GetTraceStatsResponse>,
|
|
&_IPC_Invoker<ConsumerPort, GetTraceStatsRequest, GetTraceStatsResponse, &ConsumerPort::GetTraceStats>});
|
|
|
|
desc->methods.emplace_back(::perfetto::ipc::ServiceDescriptor::Method{
|
|
"ObserveEvents",
|
|
&_IPC_Decoder<ObserveEventsRequest>,
|
|
&_IPC_Decoder<ObserveEventsResponse>,
|
|
&_IPC_Invoker<ConsumerPort, ObserveEventsRequest, ObserveEventsResponse, &ConsumerPort::ObserveEvents>});
|
|
|
|
desc->methods.emplace_back(::perfetto::ipc::ServiceDescriptor::Method{
|
|
"QueryServiceState",
|
|
&_IPC_Decoder<QueryServiceStateRequest>,
|
|
&_IPC_Decoder<QueryServiceStateResponse>,
|
|
&_IPC_Invoker<ConsumerPort, QueryServiceStateRequest, QueryServiceStateResponse, &ConsumerPort::QueryServiceState>});
|
|
|
|
desc->methods.emplace_back(::perfetto::ipc::ServiceDescriptor::Method{
|
|
"QueryCapabilities",
|
|
&_IPC_Decoder<QueryCapabilitiesRequest>,
|
|
&_IPC_Decoder<QueryCapabilitiesResponse>,
|
|
&_IPC_Invoker<ConsumerPort, QueryCapabilitiesRequest, QueryCapabilitiesResponse, &ConsumerPort::QueryCapabilities>});
|
|
desc->methods.shrink_to_fit();
|
|
return desc;
|
|
}
|
|
|
|
|
|
const ::perfetto::ipc::ServiceDescriptor& ConsumerPort::GetDescriptorStatic() {
|
|
static auto* instance = NewDescriptor();
|
|
return *instance;
|
|
}
|
|
|
|
// Host-side definitions.
|
|
ConsumerPort::~ConsumerPort() = default;
|
|
|
|
const ::perfetto::ipc::ServiceDescriptor& ConsumerPort::GetDescriptor() {
|
|
return GetDescriptorStatic();
|
|
}
|
|
|
|
// Client-side definitions.
|
|
ConsumerPortProxy::ConsumerPortProxy(::perfetto::ipc::ServiceProxy::EventListener* event_listener)
|
|
: ::perfetto::ipc::ServiceProxy(event_listener) {}
|
|
|
|
ConsumerPortProxy::~ConsumerPortProxy() = default;
|
|
|
|
const ::perfetto::ipc::ServiceDescriptor& ConsumerPortProxy::GetDescriptor() {
|
|
return ConsumerPort::GetDescriptorStatic();
|
|
}
|
|
|
|
void ConsumerPortProxy::EnableTracing(const EnableTracingRequest& request, DeferredEnableTracingResponse reply, int fd) {
|
|
BeginInvoke("EnableTracing", request, ::perfetto::ipc::DeferredBase(std::move(reply)),
|
|
fd);
|
|
}
|
|
|
|
void ConsumerPortProxy::DisableTracing(const DisableTracingRequest& request, DeferredDisableTracingResponse reply, int fd) {
|
|
BeginInvoke("DisableTracing", request, ::perfetto::ipc::DeferredBase(std::move(reply)),
|
|
fd);
|
|
}
|
|
|
|
void ConsumerPortProxy::ReadBuffers(const ReadBuffersRequest& request, DeferredReadBuffersResponse reply, int fd) {
|
|
BeginInvoke("ReadBuffers", request, ::perfetto::ipc::DeferredBase(std::move(reply)),
|
|
fd);
|
|
}
|
|
|
|
void ConsumerPortProxy::FreeBuffers(const FreeBuffersRequest& request, DeferredFreeBuffersResponse reply, int fd) {
|
|
BeginInvoke("FreeBuffers", request, ::perfetto::ipc::DeferredBase(std::move(reply)),
|
|
fd);
|
|
}
|
|
|
|
void ConsumerPortProxy::Flush(const FlushRequest& request, DeferredFlushResponse reply, int fd) {
|
|
BeginInvoke("Flush", request, ::perfetto::ipc::DeferredBase(std::move(reply)),
|
|
fd);
|
|
}
|
|
|
|
void ConsumerPortProxy::StartTracing(const StartTracingRequest& request, DeferredStartTracingResponse reply, int fd) {
|
|
BeginInvoke("StartTracing", request, ::perfetto::ipc::DeferredBase(std::move(reply)),
|
|
fd);
|
|
}
|
|
|
|
void ConsumerPortProxy::ChangeTraceConfig(const ChangeTraceConfigRequest& request, DeferredChangeTraceConfigResponse reply, int fd) {
|
|
BeginInvoke("ChangeTraceConfig", request, ::perfetto::ipc::DeferredBase(std::move(reply)),
|
|
fd);
|
|
}
|
|
|
|
void ConsumerPortProxy::Detach(const DetachRequest& request, DeferredDetachResponse reply, int fd) {
|
|
BeginInvoke("Detach", request, ::perfetto::ipc::DeferredBase(std::move(reply)),
|
|
fd);
|
|
}
|
|
|
|
void ConsumerPortProxy::Attach(const AttachRequest& request, DeferredAttachResponse reply, int fd) {
|
|
BeginInvoke("Attach", request, ::perfetto::ipc::DeferredBase(std::move(reply)),
|
|
fd);
|
|
}
|
|
|
|
void ConsumerPortProxy::GetTraceStats(const GetTraceStatsRequest& request, DeferredGetTraceStatsResponse reply, int fd) {
|
|
BeginInvoke("GetTraceStats", request, ::perfetto::ipc::DeferredBase(std::move(reply)),
|
|
fd);
|
|
}
|
|
|
|
void ConsumerPortProxy::ObserveEvents(const ObserveEventsRequest& request, DeferredObserveEventsResponse reply, int fd) {
|
|
BeginInvoke("ObserveEvents", request, ::perfetto::ipc::DeferredBase(std::move(reply)),
|
|
fd);
|
|
}
|
|
|
|
void ConsumerPortProxy::QueryServiceState(const QueryServiceStateRequest& request, DeferredQueryServiceStateResponse reply, int fd) {
|
|
BeginInvoke("QueryServiceState", request, ::perfetto::ipc::DeferredBase(std::move(reply)),
|
|
fd);
|
|
}
|
|
|
|
void ConsumerPortProxy::QueryCapabilities(const QueryCapabilitiesRequest& request, DeferredQueryCapabilitiesResponse reply, int fd) {
|
|
BeginInvoke("QueryCapabilities", request, ::perfetto::ipc::DeferredBase(std::move(reply)),
|
|
fd);
|
|
}
|
|
} // namespace perfetto
|
|
} // namespace protos
|
|
} // namespace gen
|
|
// gen_amalgamated begin source: gen/protos/perfetto/ipc/producer_port.ipc.cc
|
|
// gen_amalgamated begin header: gen/protos/perfetto/ipc/producer_port.ipc.h
|
|
// DO NOT EDIT. Autogenerated by Perfetto IPC
|
|
#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_IPC_PRODUCER_PORT_PROTO_H_
|
|
#define PERFETTO_PROTOS_PROTOS_PERFETTO_IPC_PRODUCER_PORT_PROTO_H_
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/ext/ipc/deferred.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/ipc/service.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/ipc/service_descriptor.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/ipc/service_proxy.h"
|
|
|
|
// gen_amalgamated expanded: #include "protos/perfetto/ipc/producer_port.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/common/commit_data_request.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/data_source_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/common/data_source_descriptor.gen.h"
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace gen {
|
|
|
|
class ProducerPort : public ::perfetto::ipc::Service {
|
|
private:
|
|
static ::perfetto::ipc::ServiceDescriptor* NewDescriptor();
|
|
|
|
public:
|
|
~ProducerPort() override;
|
|
|
|
static const ::perfetto::ipc::ServiceDescriptor& GetDescriptorStatic();
|
|
|
|
// Service implementation.
|
|
const ::perfetto::ipc::ServiceDescriptor& GetDescriptor() override;
|
|
|
|
// Methods from the .proto file
|
|
using DeferredInitializeConnectionResponse = ::perfetto::ipc::Deferred<InitializeConnectionResponse>;
|
|
virtual void InitializeConnection(const InitializeConnectionRequest&, DeferredInitializeConnectionResponse) = 0;
|
|
|
|
using DeferredRegisterDataSourceResponse = ::perfetto::ipc::Deferred<RegisterDataSourceResponse>;
|
|
virtual void RegisterDataSource(const RegisterDataSourceRequest&, DeferredRegisterDataSourceResponse) = 0;
|
|
|
|
using DeferredUnregisterDataSourceResponse = ::perfetto::ipc::Deferred<UnregisterDataSourceResponse>;
|
|
virtual void UnregisterDataSource(const UnregisterDataSourceRequest&, DeferredUnregisterDataSourceResponse) = 0;
|
|
|
|
using DeferredCommitDataResponse = ::perfetto::ipc::Deferred<CommitDataResponse>;
|
|
virtual void CommitData(const CommitDataRequest&, DeferredCommitDataResponse) = 0;
|
|
|
|
using DeferredGetAsyncCommandResponse = ::perfetto::ipc::Deferred<GetAsyncCommandResponse>;
|
|
virtual void GetAsyncCommand(const GetAsyncCommandRequest&, DeferredGetAsyncCommandResponse) = 0;
|
|
|
|
using DeferredRegisterTraceWriterResponse = ::perfetto::ipc::Deferred<RegisterTraceWriterResponse>;
|
|
virtual void RegisterTraceWriter(const RegisterTraceWriterRequest&, DeferredRegisterTraceWriterResponse) = 0;
|
|
|
|
using DeferredUnregisterTraceWriterResponse = ::perfetto::ipc::Deferred<UnregisterTraceWriterResponse>;
|
|
virtual void UnregisterTraceWriter(const UnregisterTraceWriterRequest&, DeferredUnregisterTraceWriterResponse) = 0;
|
|
|
|
using DeferredNotifyDataSourceStartedResponse = ::perfetto::ipc::Deferred<NotifyDataSourceStartedResponse>;
|
|
virtual void NotifyDataSourceStarted(const NotifyDataSourceStartedRequest&, DeferredNotifyDataSourceStartedResponse) = 0;
|
|
|
|
using DeferredNotifyDataSourceStoppedResponse = ::perfetto::ipc::Deferred<NotifyDataSourceStoppedResponse>;
|
|
virtual void NotifyDataSourceStopped(const NotifyDataSourceStoppedRequest&, DeferredNotifyDataSourceStoppedResponse) = 0;
|
|
|
|
using DeferredActivateTriggersResponse = ::perfetto::ipc::Deferred<ActivateTriggersResponse>;
|
|
virtual void ActivateTriggers(const ActivateTriggersRequest&, DeferredActivateTriggersResponse) = 0;
|
|
|
|
using DeferredSyncResponse = ::perfetto::ipc::Deferred<SyncResponse>;
|
|
virtual void Sync(const SyncRequest&, DeferredSyncResponse) = 0;
|
|
|
|
};
|
|
|
|
|
|
class ProducerPortProxy : public ::perfetto::ipc::ServiceProxy {
|
|
public:
|
|
explicit ProducerPortProxy(::perfetto::ipc::ServiceProxy::EventListener*);
|
|
~ProducerPortProxy() override;
|
|
|
|
// ServiceProxy implementation.
|
|
const ::perfetto::ipc::ServiceDescriptor& GetDescriptor() override;
|
|
|
|
// Methods from the .proto file
|
|
using DeferredInitializeConnectionResponse = ::perfetto::ipc::Deferred<InitializeConnectionResponse>;
|
|
void InitializeConnection(const InitializeConnectionRequest&, DeferredInitializeConnectionResponse, int fd = -1);
|
|
|
|
using DeferredRegisterDataSourceResponse = ::perfetto::ipc::Deferred<RegisterDataSourceResponse>;
|
|
void RegisterDataSource(const RegisterDataSourceRequest&, DeferredRegisterDataSourceResponse, int fd = -1);
|
|
|
|
using DeferredUnregisterDataSourceResponse = ::perfetto::ipc::Deferred<UnregisterDataSourceResponse>;
|
|
void UnregisterDataSource(const UnregisterDataSourceRequest&, DeferredUnregisterDataSourceResponse, int fd = -1);
|
|
|
|
using DeferredCommitDataResponse = ::perfetto::ipc::Deferred<CommitDataResponse>;
|
|
void CommitData(const CommitDataRequest&, DeferredCommitDataResponse, int fd = -1);
|
|
|
|
using DeferredGetAsyncCommandResponse = ::perfetto::ipc::Deferred<GetAsyncCommandResponse>;
|
|
void GetAsyncCommand(const GetAsyncCommandRequest&, DeferredGetAsyncCommandResponse, int fd = -1);
|
|
|
|
using DeferredRegisterTraceWriterResponse = ::perfetto::ipc::Deferred<RegisterTraceWriterResponse>;
|
|
void RegisterTraceWriter(const RegisterTraceWriterRequest&, DeferredRegisterTraceWriterResponse, int fd = -1);
|
|
|
|
using DeferredUnregisterTraceWriterResponse = ::perfetto::ipc::Deferred<UnregisterTraceWriterResponse>;
|
|
void UnregisterTraceWriter(const UnregisterTraceWriterRequest&, DeferredUnregisterTraceWriterResponse, int fd = -1);
|
|
|
|
using DeferredNotifyDataSourceStartedResponse = ::perfetto::ipc::Deferred<NotifyDataSourceStartedResponse>;
|
|
void NotifyDataSourceStarted(const NotifyDataSourceStartedRequest&, DeferredNotifyDataSourceStartedResponse, int fd = -1);
|
|
|
|
using DeferredNotifyDataSourceStoppedResponse = ::perfetto::ipc::Deferred<NotifyDataSourceStoppedResponse>;
|
|
void NotifyDataSourceStopped(const NotifyDataSourceStoppedRequest&, DeferredNotifyDataSourceStoppedResponse, int fd = -1);
|
|
|
|
using DeferredActivateTriggersResponse = ::perfetto::ipc::Deferred<ActivateTriggersResponse>;
|
|
void ActivateTriggers(const ActivateTriggersRequest&, DeferredActivateTriggersResponse, int fd = -1);
|
|
|
|
using DeferredSyncResponse = ::perfetto::ipc::Deferred<SyncResponse>;
|
|
void Sync(const SyncRequest&, DeferredSyncResponse, int fd = -1);
|
|
|
|
};
|
|
|
|
} // namespace perfetto
|
|
} // namespace protos
|
|
} // namespace gen
|
|
|
|
#endif // PERFETTO_PROTOS_PROTOS_PERFETTO_IPC_PRODUCER_PORT_PROTO_H_
|
|
// DO NOT EDIT. Autogenerated by Perfetto IPC
|
|
// gen_amalgamated expanded: #include "protos/perfetto/ipc/producer_port.ipc.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/ipc/codegen_helpers.h"
|
|
|
|
#include <memory>
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace gen {
|
|
::perfetto::ipc::ServiceDescriptor* ProducerPort::NewDescriptor() {
|
|
auto* desc = new ::perfetto::ipc::ServiceDescriptor();
|
|
desc->service_name = "ProducerPort";
|
|
|
|
desc->methods.emplace_back(::perfetto::ipc::ServiceDescriptor::Method{
|
|
"InitializeConnection",
|
|
&_IPC_Decoder<InitializeConnectionRequest>,
|
|
&_IPC_Decoder<InitializeConnectionResponse>,
|
|
&_IPC_Invoker<ProducerPort, InitializeConnectionRequest, InitializeConnectionResponse, &ProducerPort::InitializeConnection>});
|
|
|
|
desc->methods.emplace_back(::perfetto::ipc::ServiceDescriptor::Method{
|
|
"RegisterDataSource",
|
|
&_IPC_Decoder<RegisterDataSourceRequest>,
|
|
&_IPC_Decoder<RegisterDataSourceResponse>,
|
|
&_IPC_Invoker<ProducerPort, RegisterDataSourceRequest, RegisterDataSourceResponse, &ProducerPort::RegisterDataSource>});
|
|
|
|
desc->methods.emplace_back(::perfetto::ipc::ServiceDescriptor::Method{
|
|
"UnregisterDataSource",
|
|
&_IPC_Decoder<UnregisterDataSourceRequest>,
|
|
&_IPC_Decoder<UnregisterDataSourceResponse>,
|
|
&_IPC_Invoker<ProducerPort, UnregisterDataSourceRequest, UnregisterDataSourceResponse, &ProducerPort::UnregisterDataSource>});
|
|
|
|
desc->methods.emplace_back(::perfetto::ipc::ServiceDescriptor::Method{
|
|
"CommitData",
|
|
&_IPC_Decoder<CommitDataRequest>,
|
|
&_IPC_Decoder<CommitDataResponse>,
|
|
&_IPC_Invoker<ProducerPort, CommitDataRequest, CommitDataResponse, &ProducerPort::CommitData>});
|
|
|
|
desc->methods.emplace_back(::perfetto::ipc::ServiceDescriptor::Method{
|
|
"GetAsyncCommand",
|
|
&_IPC_Decoder<GetAsyncCommandRequest>,
|
|
&_IPC_Decoder<GetAsyncCommandResponse>,
|
|
&_IPC_Invoker<ProducerPort, GetAsyncCommandRequest, GetAsyncCommandResponse, &ProducerPort::GetAsyncCommand>});
|
|
|
|
desc->methods.emplace_back(::perfetto::ipc::ServiceDescriptor::Method{
|
|
"RegisterTraceWriter",
|
|
&_IPC_Decoder<RegisterTraceWriterRequest>,
|
|
&_IPC_Decoder<RegisterTraceWriterResponse>,
|
|
&_IPC_Invoker<ProducerPort, RegisterTraceWriterRequest, RegisterTraceWriterResponse, &ProducerPort::RegisterTraceWriter>});
|
|
|
|
desc->methods.emplace_back(::perfetto::ipc::ServiceDescriptor::Method{
|
|
"UnregisterTraceWriter",
|
|
&_IPC_Decoder<UnregisterTraceWriterRequest>,
|
|
&_IPC_Decoder<UnregisterTraceWriterResponse>,
|
|
&_IPC_Invoker<ProducerPort, UnregisterTraceWriterRequest, UnregisterTraceWriterResponse, &ProducerPort::UnregisterTraceWriter>});
|
|
|
|
desc->methods.emplace_back(::perfetto::ipc::ServiceDescriptor::Method{
|
|
"NotifyDataSourceStarted",
|
|
&_IPC_Decoder<NotifyDataSourceStartedRequest>,
|
|
&_IPC_Decoder<NotifyDataSourceStartedResponse>,
|
|
&_IPC_Invoker<ProducerPort, NotifyDataSourceStartedRequest, NotifyDataSourceStartedResponse, &ProducerPort::NotifyDataSourceStarted>});
|
|
|
|
desc->methods.emplace_back(::perfetto::ipc::ServiceDescriptor::Method{
|
|
"NotifyDataSourceStopped",
|
|
&_IPC_Decoder<NotifyDataSourceStoppedRequest>,
|
|
&_IPC_Decoder<NotifyDataSourceStoppedResponse>,
|
|
&_IPC_Invoker<ProducerPort, NotifyDataSourceStoppedRequest, NotifyDataSourceStoppedResponse, &ProducerPort::NotifyDataSourceStopped>});
|
|
|
|
desc->methods.emplace_back(::perfetto::ipc::ServiceDescriptor::Method{
|
|
"ActivateTriggers",
|
|
&_IPC_Decoder<ActivateTriggersRequest>,
|
|
&_IPC_Decoder<ActivateTriggersResponse>,
|
|
&_IPC_Invoker<ProducerPort, ActivateTriggersRequest, ActivateTriggersResponse, &ProducerPort::ActivateTriggers>});
|
|
|
|
desc->methods.emplace_back(::perfetto::ipc::ServiceDescriptor::Method{
|
|
"Sync",
|
|
&_IPC_Decoder<SyncRequest>,
|
|
&_IPC_Decoder<SyncResponse>,
|
|
&_IPC_Invoker<ProducerPort, SyncRequest, SyncResponse, &ProducerPort::Sync>});
|
|
desc->methods.shrink_to_fit();
|
|
return desc;
|
|
}
|
|
|
|
|
|
const ::perfetto::ipc::ServiceDescriptor& ProducerPort::GetDescriptorStatic() {
|
|
static auto* instance = NewDescriptor();
|
|
return *instance;
|
|
}
|
|
|
|
// Host-side definitions.
|
|
ProducerPort::~ProducerPort() = default;
|
|
|
|
const ::perfetto::ipc::ServiceDescriptor& ProducerPort::GetDescriptor() {
|
|
return GetDescriptorStatic();
|
|
}
|
|
|
|
// Client-side definitions.
|
|
ProducerPortProxy::ProducerPortProxy(::perfetto::ipc::ServiceProxy::EventListener* event_listener)
|
|
: ::perfetto::ipc::ServiceProxy(event_listener) {}
|
|
|
|
ProducerPortProxy::~ProducerPortProxy() = default;
|
|
|
|
const ::perfetto::ipc::ServiceDescriptor& ProducerPortProxy::GetDescriptor() {
|
|
return ProducerPort::GetDescriptorStatic();
|
|
}
|
|
|
|
void ProducerPortProxy::InitializeConnection(const InitializeConnectionRequest& request, DeferredInitializeConnectionResponse reply, int fd) {
|
|
BeginInvoke("InitializeConnection", request, ::perfetto::ipc::DeferredBase(std::move(reply)),
|
|
fd);
|
|
}
|
|
|
|
void ProducerPortProxy::RegisterDataSource(const RegisterDataSourceRequest& request, DeferredRegisterDataSourceResponse reply, int fd) {
|
|
BeginInvoke("RegisterDataSource", request, ::perfetto::ipc::DeferredBase(std::move(reply)),
|
|
fd);
|
|
}
|
|
|
|
void ProducerPortProxy::UnregisterDataSource(const UnregisterDataSourceRequest& request, DeferredUnregisterDataSourceResponse reply, int fd) {
|
|
BeginInvoke("UnregisterDataSource", request, ::perfetto::ipc::DeferredBase(std::move(reply)),
|
|
fd);
|
|
}
|
|
|
|
void ProducerPortProxy::CommitData(const CommitDataRequest& request, DeferredCommitDataResponse reply, int fd) {
|
|
BeginInvoke("CommitData", request, ::perfetto::ipc::DeferredBase(std::move(reply)),
|
|
fd);
|
|
}
|
|
|
|
void ProducerPortProxy::GetAsyncCommand(const GetAsyncCommandRequest& request, DeferredGetAsyncCommandResponse reply, int fd) {
|
|
BeginInvoke("GetAsyncCommand", request, ::perfetto::ipc::DeferredBase(std::move(reply)),
|
|
fd);
|
|
}
|
|
|
|
void ProducerPortProxy::RegisterTraceWriter(const RegisterTraceWriterRequest& request, DeferredRegisterTraceWriterResponse reply, int fd) {
|
|
BeginInvoke("RegisterTraceWriter", request, ::perfetto::ipc::DeferredBase(std::move(reply)),
|
|
fd);
|
|
}
|
|
|
|
void ProducerPortProxy::UnregisterTraceWriter(const UnregisterTraceWriterRequest& request, DeferredUnregisterTraceWriterResponse reply, int fd) {
|
|
BeginInvoke("UnregisterTraceWriter", request, ::perfetto::ipc::DeferredBase(std::move(reply)),
|
|
fd);
|
|
}
|
|
|
|
void ProducerPortProxy::NotifyDataSourceStarted(const NotifyDataSourceStartedRequest& request, DeferredNotifyDataSourceStartedResponse reply, int fd) {
|
|
BeginInvoke("NotifyDataSourceStarted", request, ::perfetto::ipc::DeferredBase(std::move(reply)),
|
|
fd);
|
|
}
|
|
|
|
void ProducerPortProxy::NotifyDataSourceStopped(const NotifyDataSourceStoppedRequest& request, DeferredNotifyDataSourceStoppedResponse reply, int fd) {
|
|
BeginInvoke("NotifyDataSourceStopped", request, ::perfetto::ipc::DeferredBase(std::move(reply)),
|
|
fd);
|
|
}
|
|
|
|
void ProducerPortProxy::ActivateTriggers(const ActivateTriggersRequest& request, DeferredActivateTriggersResponse reply, int fd) {
|
|
BeginInvoke("ActivateTriggers", request, ::perfetto::ipc::DeferredBase(std::move(reply)),
|
|
fd);
|
|
}
|
|
|
|
void ProducerPortProxy::Sync(const SyncRequest& request, DeferredSyncResponse reply, int fd) {
|
|
BeginInvoke("Sync", request, ::perfetto::ipc::DeferredBase(std::move(reply)),
|
|
fd);
|
|
}
|
|
} // namespace perfetto
|
|
} // namespace protos
|
|
} // namespace gen
|
|
// gen_amalgamated begin source: src/base/unix_socket.cc
|
|
// gen_amalgamated begin header: include/perfetto/ext/base/unix_socket.h
|
|
/*
|
|
* Copyright (C) 2017 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
#ifndef INCLUDE_PERFETTO_EXT_BASE_UNIX_SOCKET_H_
|
|
#define INCLUDE_PERFETTO_EXT_BASE_UNIX_SOCKET_H_
|
|
|
|
#include <stdint.h>
|
|
#include <sys/types.h>
|
|
|
|
#include <memory>
|
|
#include <string>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/base/logging.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/scoped_file.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/utils.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/weak_ptr.h"
|
|
|
|
struct msghdr;
|
|
|
|
namespace perfetto {
|
|
namespace base {
|
|
|
|
class TaskRunner;
|
|
|
|
// Use arbitrarily high values to avoid that some code accidentally ends up
|
|
// assuming that these enum values match the sysroot's SOCK_xxx defines rather
|
|
// than using GetSockType() / GetSockFamily().
|
|
enum class SockType { kStream = 100, kDgram, kSeqPacket };
|
|
enum class SockFamily { kUnix = 200, kInet, kInet6 };
|
|
|
|
// UnixSocketRaw is a basic wrapper around UNIX sockets. It exposes wrapper
|
|
// methods that take care of most common pitfalls (e.g., marking fd as
|
|
// O_CLOEXEC, avoiding SIGPIPE, properly handling partial writes). It is used as
|
|
// a building block for the more sophisticated UnixSocket class.
|
|
class UnixSocketRaw {
|
|
public:
|
|
// Creates a new unconnected unix socket.
|
|
static UnixSocketRaw CreateMayFail(SockFamily family, SockType type);
|
|
|
|
// Crates a pair of connected sockets.
|
|
static std::pair<UnixSocketRaw, UnixSocketRaw> CreatePair(SockFamily,
|
|
SockType);
|
|
|
|
// Creates an uninitialized unix socket.
|
|
UnixSocketRaw();
|
|
|
|
// Creates a unix socket adopting an existing file descriptor. This is
|
|
// typically used to inherit fds from init via environment variables.
|
|
UnixSocketRaw(ScopedFile, SockFamily, SockType);
|
|
|
|
~UnixSocketRaw() = default;
|
|
UnixSocketRaw(UnixSocketRaw&&) noexcept = default;
|
|
UnixSocketRaw& operator=(UnixSocketRaw&&) = default;
|
|
|
|
bool Bind(const std::string& socket_name);
|
|
bool Listen();
|
|
bool Connect(const std::string& socket_name);
|
|
bool SetTxTimeout(uint32_t timeout_ms);
|
|
bool SetRxTimeout(uint32_t timeout_ms);
|
|
void Shutdown();
|
|
void SetBlocking(bool);
|
|
bool IsBlocking() const;
|
|
void RetainOnExec();
|
|
SockType type() const { return type_; }
|
|
SockFamily family() const { return family_; }
|
|
int fd() const { return *fd_; }
|
|
explicit operator bool() const { return !!fd_; }
|
|
|
|
ScopedFile ReleaseFd() { return std::move(fd_); }
|
|
|
|
ssize_t Send(const void* msg,
|
|
size_t len,
|
|
const int* send_fds = nullptr,
|
|
size_t num_fds = 0);
|
|
|
|
// Re-enter sendmsg until all the data has been sent or an error occurs.
|
|
// TODO(fmayer): Figure out how to do timeouts here for heapprofd.
|
|
ssize_t SendMsgAll(struct msghdr* msg);
|
|
|
|
ssize_t Receive(void* msg,
|
|
size_t len,
|
|
ScopedFile* fd_vec = nullptr,
|
|
size_t max_files = 0);
|
|
|
|
// Exposed for testing only.
|
|
// Update msghdr so subsequent sendmsg will send data that remains after n
|
|
// bytes have already been sent.
|
|
static void ShiftMsgHdr(size_t n, struct msghdr* msg);
|
|
|
|
private:
|
|
UnixSocketRaw(SockFamily, SockType);
|
|
|
|
UnixSocketRaw(const UnixSocketRaw&) = delete;
|
|
UnixSocketRaw& operator=(const UnixSocketRaw&) = delete;
|
|
|
|
ScopedFile fd_;
|
|
SockFamily family_ = SockFamily::kUnix;
|
|
SockType type_ = SockType::kStream;
|
|
};
|
|
|
|
// A non-blocking UNIX domain socket. Allows also to transfer file descriptors.
|
|
// None of the methods in this class are blocking.
|
|
// The main design goal is making strong guarantees on the EventListener
|
|
// callbacks, in order to avoid ending in some undefined state.
|
|
// In case of any error it will aggressively just shut down the socket and
|
|
// notify the failure with OnConnect(false) or OnDisconnect() depending on the
|
|
// state of the socket (see below).
|
|
// EventListener callbacks stop happening as soon as the instance is destroyed.
|
|
//
|
|
// Lifecycle of a client socket:
|
|
//
|
|
// Connect()
|
|
// |
|
|
// +------------------+------------------+
|
|
// | (success) | (failure or Shutdown())
|
|
// V V
|
|
// OnConnect(true) OnConnect(false)
|
|
// |
|
|
// V
|
|
// OnDataAvailable()
|
|
// |
|
|
// V
|
|
// OnDisconnect() (failure or shutdown)
|
|
//
|
|
//
|
|
// Lifecycle of a server socket:
|
|
//
|
|
// Listen() --> returns false in case of errors.
|
|
// |
|
|
// V
|
|
// OnNewIncomingConnection(new_socket)
|
|
//
|
|
// (|new_socket| inherits the same EventListener)
|
|
// |
|
|
// V
|
|
// OnDataAvailable()
|
|
// | (failure or Shutdown())
|
|
// V
|
|
// OnDisconnect()
|
|
class UnixSocket {
|
|
public:
|
|
class EventListener {
|
|
public:
|
|
virtual ~EventListener();
|
|
|
|
// After Listen().
|
|
virtual void OnNewIncomingConnection(
|
|
UnixSocket* self,
|
|
std::unique_ptr<UnixSocket> new_connection);
|
|
|
|
// After Connect(), whether successful or not.
|
|
virtual void OnConnect(UnixSocket* self, bool connected);
|
|
|
|
// After a successful Connect() or OnNewIncomingConnection(). Either the
|
|
// other endpoint did disconnect or some other error happened.
|
|
virtual void OnDisconnect(UnixSocket* self);
|
|
|
|
// Whenever there is data available to Receive(). Note that spurious FD
|
|
// watch events are possible, so it is possible that Receive() soon after
|
|
// OnDataAvailable() returns 0 (just ignore those).
|
|
virtual void OnDataAvailable(UnixSocket* self);
|
|
};
|
|
|
|
enum class State {
|
|
kDisconnected = 0, // Failed connection, peer disconnection or Shutdown().
|
|
kConnecting, // Soon after Connect(), before it either succeeds or fails.
|
|
kConnected, // After a successful Connect().
|
|
kListening // After Listen(), until Shutdown().
|
|
};
|
|
|
|
// Creates a socket and starts listening. If SockFamily::kUnix and
|
|
// |socket_name| starts with a '@', an abstract UNIX dmoain socket will be
|
|
// created instead of a filesystem-linked UNIX socket (Linux/Android only).
|
|
// If SockFamily::kInet, |socket_name| is host:port (e.g., "1.2.3.4:8000").
|
|
// If SockFamily::kInet6, |socket_name| is [host]:port (e.g., "[::1]:8000").
|
|
// Returns nullptr if the socket creation or bind fails. If listening fails,
|
|
// (e.g. if another socket with the same name is already listening) the
|
|
// returned socket will have is_listening() == false and last_error() will
|
|
// contain the failure reason.
|
|
static std::unique_ptr<UnixSocket> Listen(const std::string& socket_name,
|
|
EventListener*,
|
|
TaskRunner*,
|
|
SockFamily,
|
|
SockType);
|
|
|
|
// Attaches to a pre-existing socket. The socket must have been created in
|
|
// SOCK_STREAM mode and the caller must have called bind() on it.
|
|
static std::unique_ptr<UnixSocket> Listen(ScopedFile,
|
|
EventListener*,
|
|
TaskRunner*,
|
|
SockFamily,
|
|
SockType);
|
|
|
|
// Creates a Unix domain socket and connects to the listening endpoint.
|
|
// Returns always an instance. EventListener::OnConnect(bool success) will
|
|
// be called always, whether the connection succeeded or not.
|
|
static std::unique_ptr<UnixSocket> Connect(const std::string& socket_name,
|
|
EventListener*,
|
|
TaskRunner*,
|
|
SockFamily,
|
|
SockType);
|
|
|
|
// Constructs a UnixSocket using the given connected socket.
|
|
static std::unique_ptr<UnixSocket> AdoptConnected(ScopedFile,
|
|
EventListener*,
|
|
TaskRunner*,
|
|
SockFamily,
|
|
SockType);
|
|
|
|
UnixSocket(const UnixSocket&) = delete;
|
|
UnixSocket& operator=(const UnixSocket&) = delete;
|
|
// Cannot be easily moved because of tasks from the FileDescriptorWatch.
|
|
UnixSocket(UnixSocket&&) = delete;
|
|
UnixSocket& operator=(UnixSocket&&) = delete;
|
|
|
|
// This class gives the hard guarantee that no callback is called on the
|
|
// passed EventListener immediately after the object has been destroyed.
|
|
// Any queued callback will be silently dropped.
|
|
~UnixSocket();
|
|
|
|
// Shuts down the current connection, if any. If the socket was Listen()-ing,
|
|
// stops listening. The socket goes back to kNotInitialized state, so it can
|
|
// be reused with Listen() or Connect().
|
|
void Shutdown(bool notify);
|
|
|
|
// Returns true is the message was queued, false if there was no space in the
|
|
// output buffer, in which case the client should retry or give up.
|
|
// If any other error happens the socket will be shutdown and
|
|
// EventListener::OnDisconnect() will be called.
|
|
// If the socket is not connected, Send() will just return false.
|
|
// Does not append a null string terminator to msg in any case.
|
|
bool Send(const void* msg, size_t len, const int* send_fds, size_t num_fds);
|
|
|
|
inline bool Send(const void* msg, size_t len, int send_fd = -1) {
|
|
if (send_fd != -1)
|
|
return Send(msg, len, &send_fd, 1);
|
|
return Send(msg, len, nullptr, 0);
|
|
}
|
|
|
|
inline bool Send(const std::string& msg) {
|
|
return Send(msg.c_str(), msg.size() + 1, -1);
|
|
}
|
|
|
|
// Returns the number of bytes (<= |len|) written in |msg| or 0 if there
|
|
// is no data in the buffer to read or an error occurs (in which case a
|
|
// EventListener::OnDisconnect() will follow).
|
|
// If the ScopedFile pointer is not null and a FD is received, it moves the
|
|
// received FD into that. If a FD is received but the ScopedFile pointer is
|
|
// null, the FD will be automatically closed.
|
|
size_t Receive(void* msg, size_t len, ScopedFile*, size_t max_files = 1);
|
|
|
|
inline size_t Receive(void* msg, size_t len) {
|
|
return Receive(msg, len, nullptr, 0);
|
|
}
|
|
|
|
// Only for tests. This is slower than Receive() as it requires a heap
|
|
// allocation and a copy for the std::string. Guarantees that the returned
|
|
// string is null terminated even if the underlying message sent by the peer
|
|
// is not.
|
|
std::string ReceiveString(size_t max_length = 1024);
|
|
|
|
bool is_connected() const { return state_ == State::kConnected; }
|
|
bool is_listening() const { return state_ == State::kListening; }
|
|
int fd() const { return sock_raw_.fd(); }
|
|
int last_error() const { return last_error_; }
|
|
|
|
// User ID of the peer, as returned by the kernel. If the client disconnects
|
|
// and the socket goes into the kDisconnected state, it retains the uid of
|
|
// the last peer.
|
|
uid_t peer_uid() const {
|
|
PERFETTO_DCHECK(!is_listening() && peer_uid_ != kInvalidUid);
|
|
ignore_result(kInvalidPid); // Silence warnings in amalgamated builds.
|
|
return peer_uid_;
|
|
}
|
|
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_OS_LINUX) || \
|
|
PERFETTO_BUILDFLAG(PERFETTO_OS_ANDROID)
|
|
// Process ID of the peer, as returned by the kernel. If the client
|
|
// disconnects and the socket goes into the kDisconnected state, it
|
|
// retains the pid of the last peer.
|
|
//
|
|
// This is only available on Linux / Android.
|
|
pid_t peer_pid() const {
|
|
PERFETTO_DCHECK(!is_listening() && peer_pid_ != kInvalidPid);
|
|
return peer_pid_;
|
|
}
|
|
#endif
|
|
|
|
// This makes the UnixSocket unusable.
|
|
UnixSocketRaw ReleaseSocket();
|
|
|
|
private:
|
|
UnixSocket(EventListener*, TaskRunner*, SockFamily, SockType);
|
|
UnixSocket(EventListener*,
|
|
TaskRunner*,
|
|
ScopedFile,
|
|
State,
|
|
SockFamily,
|
|
SockType);
|
|
|
|
// Called once by the corresponding public static factory methods.
|
|
void DoConnect(const std::string& socket_name);
|
|
void ReadPeerCredentials();
|
|
|
|
void OnEvent();
|
|
void NotifyConnectionState(bool success);
|
|
|
|
UnixSocketRaw sock_raw_;
|
|
State state_ = State::kDisconnected;
|
|
int last_error_ = 0;
|
|
uid_t peer_uid_ = kInvalidUid;
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_OS_LINUX) || \
|
|
PERFETTO_BUILDFLAG(PERFETTO_OS_ANDROID)
|
|
pid_t peer_pid_ = kInvalidPid;
|
|
#endif
|
|
EventListener* const event_listener_;
|
|
TaskRunner* const task_runner_;
|
|
WeakPtrFactory<UnixSocket> weak_ptr_factory_; // Keep last.
|
|
};
|
|
|
|
} // namespace base
|
|
} // namespace perfetto
|
|
|
|
#endif // INCLUDE_PERFETTO_EXT_BASE_UNIX_SOCKET_H_
|
|
/*
|
|
* Copyright (C) 2017 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/unix_socket.h"
|
|
|
|
#include <errno.h>
|
|
#include <fcntl.h>
|
|
#include <netdb.h>
|
|
#include <netinet/in.h>
|
|
#include <stdlib.h>
|
|
#include <string.h>
|
|
#include <sys/socket.h>
|
|
#include <sys/stat.h>
|
|
#include <sys/types.h>
|
|
#include <sys/un.h>
|
|
#include <unistd.h>
|
|
|
|
#include <algorithm>
|
|
#include <memory>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/base/build_config.h"
|
|
// gen_amalgamated expanded: #include "perfetto/base/logging.h"
|
|
// gen_amalgamated expanded: #include "perfetto/base/task_runner.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/string_utils.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/utils.h"
|
|
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_OS_APPLE)
|
|
#include <sys/ucred.h>
|
|
#endif
|
|
|
|
namespace perfetto {
|
|
namespace base {
|
|
|
|
// The CMSG_* macros use NULL instead of nullptr.
|
|
#pragma GCC diagnostic push
|
|
#if !PERFETTO_BUILDFLAG(PERFETTO_OS_APPLE)
|
|
#pragma GCC diagnostic ignored "-Wzero-as-null-pointer-constant"
|
|
#endif
|
|
|
|
namespace {
|
|
|
|
// MSG_NOSIGNAL is not supported on Mac OS X, but in that case the socket is
|
|
// created with SO_NOSIGPIPE (See InitializeSocket()).
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_OS_APPLE)
|
|
constexpr int kNoSigPipe = 0;
|
|
#else
|
|
constexpr int kNoSigPipe = MSG_NOSIGNAL;
|
|
#endif
|
|
|
|
// Android takes an int instead of socklen_t for the control buffer size.
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_OS_ANDROID)
|
|
using CBufLenType = size_t;
|
|
#else
|
|
using CBufLenType = socklen_t;
|
|
#endif
|
|
|
|
// A wrapper around variable-size sockaddr structs.
|
|
// This is solving the following problem: when calling connect() or bind(), the
|
|
// caller needs to take care to allocate the right struct (sockaddr_un for
|
|
// AF_UNIX, sockaddr_in for AF_INET). Those structs have different sizes and,
|
|
// more importantly, are bigger than the base struct sockaddr.
|
|
struct SockaddrAny {
|
|
SockaddrAny() : size() {}
|
|
SockaddrAny(const void* addr, socklen_t sz) : data(new char[sz]), size(sz) {
|
|
memcpy(data.get(), addr, static_cast<size_t>(size));
|
|
}
|
|
|
|
const struct sockaddr* addr() const {
|
|
return reinterpret_cast<const struct sockaddr*>(data.get());
|
|
}
|
|
|
|
std::unique_ptr<char[]> data;
|
|
socklen_t size;
|
|
};
|
|
|
|
inline int GetSockFamily(SockFamily family) {
|
|
switch (family) {
|
|
case SockFamily::kUnix:
|
|
return AF_UNIX;
|
|
case SockFamily::kInet:
|
|
return AF_INET;
|
|
case SockFamily::kInet6:
|
|
return AF_INET6;
|
|
}
|
|
PERFETTO_CHECK(false); // For GCC.
|
|
}
|
|
|
|
inline int GetSockType(SockType type) {
|
|
#ifdef SOCK_CLOEXEC
|
|
constexpr int kSockCloExec = SOCK_CLOEXEC;
|
|
#else
|
|
constexpr int kSockCloExec = 0;
|
|
#endif
|
|
switch (type) {
|
|
case SockType::kStream:
|
|
return SOCK_STREAM | kSockCloExec;
|
|
case SockType::kDgram:
|
|
return SOCK_DGRAM | kSockCloExec;
|
|
case SockType::kSeqPacket:
|
|
return SOCK_SEQPACKET | kSockCloExec;
|
|
}
|
|
PERFETTO_CHECK(false); // For GCC.
|
|
}
|
|
|
|
SockaddrAny MakeSockAddr(SockFamily family, const std::string& socket_name) {
|
|
switch (family) {
|
|
case SockFamily::kUnix: {
|
|
struct sockaddr_un saddr {};
|
|
const size_t name_len = socket_name.size();
|
|
if (name_len >= sizeof(saddr.sun_path)) {
|
|
errno = ENAMETOOLONG;
|
|
return SockaddrAny();
|
|
}
|
|
memcpy(saddr.sun_path, socket_name.data(), name_len);
|
|
if (saddr.sun_path[0] == '@')
|
|
saddr.sun_path[0] = '\0';
|
|
saddr.sun_family = AF_UNIX;
|
|
auto size = static_cast<socklen_t>(
|
|
__builtin_offsetof(sockaddr_un, sun_path) + name_len + 1);
|
|
PERFETTO_CHECK(static_cast<size_t>(size) <= sizeof(saddr));
|
|
return SockaddrAny(&saddr, size);
|
|
}
|
|
case SockFamily::kInet: {
|
|
auto parts = SplitString(socket_name, ":");
|
|
PERFETTO_CHECK(parts.size() == 2);
|
|
struct addrinfo* addr_info = nullptr;
|
|
struct addrinfo hints {};
|
|
hints.ai_family = AF_INET;
|
|
PERFETTO_CHECK(getaddrinfo(parts[0].c_str(), parts[1].c_str(), &hints,
|
|
&addr_info) == 0);
|
|
PERFETTO_CHECK(addr_info->ai_family == AF_INET);
|
|
SockaddrAny res(addr_info->ai_addr, addr_info->ai_addrlen);
|
|
freeaddrinfo(addr_info);
|
|
return res;
|
|
}
|
|
case SockFamily::kInet6: {
|
|
auto parts = SplitString(socket_name, "]");
|
|
PERFETTO_CHECK(parts.size() == 2);
|
|
auto address = SplitString(parts[0], "[");
|
|
PERFETTO_CHECK(address.size() == 1);
|
|
auto port = SplitString(parts[1], ":");
|
|
PERFETTO_CHECK(port.size() == 1);
|
|
struct addrinfo* addr_info = nullptr;
|
|
struct addrinfo hints {};
|
|
hints.ai_family = AF_INET6;
|
|
PERFETTO_CHECK(getaddrinfo(address[0].c_str(), port[0].c_str(), &hints,
|
|
&addr_info) == 0);
|
|
PERFETTO_CHECK(addr_info->ai_family == AF_INET6);
|
|
SockaddrAny res(addr_info->ai_addr, addr_info->ai_addrlen);
|
|
freeaddrinfo(addr_info);
|
|
return res;
|
|
}
|
|
}
|
|
PERFETTO_CHECK(false); // For GCC.
|
|
}
|
|
|
|
} // namespace
|
|
|
|
// +-----------------------+
|
|
// | UnixSocketRaw methods |
|
|
// +-----------------------+
|
|
|
|
// static
|
|
void UnixSocketRaw::ShiftMsgHdr(size_t n, struct msghdr* msg) {
|
|
using LenType = decltype(msg->msg_iovlen); // Mac and Linux don't agree.
|
|
for (LenType i = 0; i < msg->msg_iovlen; ++i) {
|
|
struct iovec* vec = &msg->msg_iov[i];
|
|
if (n < vec->iov_len) {
|
|
// We sent a part of this iovec.
|
|
vec->iov_base = reinterpret_cast<char*>(vec->iov_base) + n;
|
|
vec->iov_len -= n;
|
|
msg->msg_iov = vec;
|
|
msg->msg_iovlen -= i;
|
|
return;
|
|
}
|
|
// We sent the whole iovec.
|
|
n -= vec->iov_len;
|
|
}
|
|
// We sent all the iovecs.
|
|
PERFETTO_CHECK(n == 0);
|
|
msg->msg_iovlen = 0;
|
|
msg->msg_iov = nullptr;
|
|
}
|
|
|
|
// static
|
|
UnixSocketRaw UnixSocketRaw::CreateMayFail(SockFamily family, SockType type) {
|
|
auto fd = ScopedFile(socket(GetSockFamily(family), GetSockType(type), 0));
|
|
if (!fd) {
|
|
return UnixSocketRaw();
|
|
}
|
|
return UnixSocketRaw(std::move(fd), family, type);
|
|
}
|
|
|
|
// static
|
|
std::pair<UnixSocketRaw, UnixSocketRaw> UnixSocketRaw::CreatePair(
|
|
SockFamily family,
|
|
SockType type) {
|
|
int fds[2];
|
|
if (socketpair(GetSockFamily(family), GetSockType(type), 0, fds) != 0)
|
|
return std::make_pair(UnixSocketRaw(), UnixSocketRaw());
|
|
|
|
return std::make_pair(UnixSocketRaw(ScopedFile(fds[0]), family, type),
|
|
UnixSocketRaw(ScopedFile(fds[1]), family, type));
|
|
}
|
|
|
|
UnixSocketRaw::UnixSocketRaw() = default;
|
|
|
|
UnixSocketRaw::UnixSocketRaw(SockFamily family, SockType type)
|
|
: UnixSocketRaw(
|
|
ScopedFile(socket(GetSockFamily(family), GetSockType(type), 0)),
|
|
family,
|
|
type) {}
|
|
|
|
UnixSocketRaw::UnixSocketRaw(ScopedFile fd, SockFamily family, SockType type)
|
|
: fd_(std::move(fd)), family_(family), type_(type) {
|
|
PERFETTO_CHECK(fd_);
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_OS_APPLE)
|
|
const int no_sigpipe = 1;
|
|
setsockopt(*fd_, SOL_SOCKET, SO_NOSIGPIPE, &no_sigpipe, sizeof(no_sigpipe));
|
|
#endif
|
|
|
|
if (family == SockFamily::kInet || family == SockFamily::kInet6) {
|
|
int flag = 1;
|
|
PERFETTO_CHECK(
|
|
!setsockopt(*fd_, SOL_SOCKET, SO_REUSEADDR, &flag, sizeof(flag)));
|
|
}
|
|
|
|
// There is no reason why a socket should outlive the process in case of
|
|
// exec() by default, this is just working around a broken unix design.
|
|
int fcntl_res = fcntl(*fd_, F_SETFD, FD_CLOEXEC);
|
|
PERFETTO_CHECK(fcntl_res == 0);
|
|
}
|
|
|
|
void UnixSocketRaw::SetBlocking(bool is_blocking) {
|
|
PERFETTO_DCHECK(fd_);
|
|
int flags = fcntl(*fd_, F_GETFL, 0);
|
|
if (!is_blocking) {
|
|
flags |= O_NONBLOCK;
|
|
} else {
|
|
flags &= ~static_cast<int>(O_NONBLOCK);
|
|
}
|
|
bool fcntl_res = fcntl(*fd_, F_SETFL, flags);
|
|
PERFETTO_CHECK(fcntl_res == 0);
|
|
}
|
|
|
|
void UnixSocketRaw::RetainOnExec() {
|
|
PERFETTO_DCHECK(fd_);
|
|
int flags = fcntl(*fd_, F_GETFD, 0);
|
|
flags &= ~static_cast<int>(FD_CLOEXEC);
|
|
bool fcntl_res = fcntl(*fd_, F_SETFD, flags);
|
|
PERFETTO_CHECK(fcntl_res == 0);
|
|
}
|
|
|
|
bool UnixSocketRaw::IsBlocking() const {
|
|
PERFETTO_DCHECK(fd_);
|
|
return (fcntl(*fd_, F_GETFL, 0) & O_NONBLOCK) == 0;
|
|
}
|
|
|
|
bool UnixSocketRaw::Bind(const std::string& socket_name) {
|
|
PERFETTO_DCHECK(fd_);
|
|
SockaddrAny addr = MakeSockAddr(family_, socket_name);
|
|
if (addr.size == 0)
|
|
return false;
|
|
|
|
if (bind(*fd_, addr.addr(), addr.size)) {
|
|
PERFETTO_DPLOG("bind(%s)", socket_name.c_str());
|
|
return false;
|
|
}
|
|
|
|
return true;
|
|
}
|
|
|
|
bool UnixSocketRaw::Listen() {
|
|
PERFETTO_DCHECK(fd_);
|
|
PERFETTO_DCHECK(type_ == SockType::kStream || type_ == SockType::kSeqPacket);
|
|
return listen(*fd_, SOMAXCONN) == 0;
|
|
}
|
|
|
|
bool UnixSocketRaw::Connect(const std::string& socket_name) {
|
|
PERFETTO_DCHECK(fd_);
|
|
SockaddrAny addr = MakeSockAddr(family_, socket_name);
|
|
if (addr.size == 0)
|
|
return false;
|
|
|
|
int res = PERFETTO_EINTR(connect(*fd_, addr.addr(), addr.size));
|
|
if (res && errno != EINPROGRESS)
|
|
return false;
|
|
|
|
return true;
|
|
}
|
|
|
|
void UnixSocketRaw::Shutdown() {
|
|
shutdown(*fd_, SHUT_RDWR);
|
|
fd_.reset();
|
|
}
|
|
|
|
// For the interested reader, Linux kernel dive to verify this is not only a
|
|
// theoretical possibility: sock_stream_sendmsg, if sock_alloc_send_pskb returns
|
|
// NULL [1] (which it does when it gets interrupted [2]), returns early with the
|
|
// amount of bytes already sent.
|
|
//
|
|
// [1]:
|
|
// https://elixir.bootlin.com/linux/v4.18.10/source/net/unix/af_unix.c#L1872
|
|
// [2]: https://elixir.bootlin.com/linux/v4.18.10/source/net/core/sock.c#L2101
|
|
ssize_t UnixSocketRaw::SendMsgAll(struct msghdr* msg) {
|
|
// This does not make sense on non-blocking sockets.
|
|
PERFETTO_DCHECK(fd_);
|
|
|
|
ssize_t total_sent = 0;
|
|
while (msg->msg_iov) {
|
|
ssize_t sent = PERFETTO_EINTR(sendmsg(*fd_, msg, kNoSigPipe));
|
|
if (sent <= 0) {
|
|
if (sent == -1 && IsAgain(errno))
|
|
return total_sent;
|
|
return sent;
|
|
}
|
|
total_sent += sent;
|
|
ShiftMsgHdr(static_cast<size_t>(sent), msg);
|
|
// Only send the ancillary data with the first sendmsg call.
|
|
msg->msg_control = nullptr;
|
|
msg->msg_controllen = 0;
|
|
}
|
|
return total_sent;
|
|
}
|
|
|
|
ssize_t UnixSocketRaw::Send(const void* msg,
|
|
size_t len,
|
|
const int* send_fds,
|
|
size_t num_fds) {
|
|
PERFETTO_DCHECK(fd_);
|
|
msghdr msg_hdr = {};
|
|
iovec iov = {const_cast<void*>(msg), len};
|
|
msg_hdr.msg_iov = &iov;
|
|
msg_hdr.msg_iovlen = 1;
|
|
alignas(cmsghdr) char control_buf[256];
|
|
|
|
if (num_fds > 0) {
|
|
const auto raw_ctl_data_sz = num_fds * sizeof(int);
|
|
const CBufLenType control_buf_len =
|
|
static_cast<CBufLenType>(CMSG_SPACE(raw_ctl_data_sz));
|
|
PERFETTO_CHECK(control_buf_len <= sizeof(control_buf));
|
|
memset(control_buf, 0, sizeof(control_buf));
|
|
msg_hdr.msg_control = control_buf;
|
|
msg_hdr.msg_controllen = control_buf_len; // used by CMSG_FIRSTHDR
|
|
struct cmsghdr* cmsg = CMSG_FIRSTHDR(&msg_hdr);
|
|
cmsg->cmsg_level = SOL_SOCKET;
|
|
cmsg->cmsg_type = SCM_RIGHTS;
|
|
cmsg->cmsg_len = static_cast<CBufLenType>(CMSG_LEN(raw_ctl_data_sz));
|
|
memcpy(CMSG_DATA(cmsg), send_fds, num_fds * sizeof(int));
|
|
// note: if we were to send multiple cmsghdr structures, then
|
|
// msg_hdr.msg_controllen would need to be adjusted, see "man 3 cmsg".
|
|
}
|
|
|
|
return SendMsgAll(&msg_hdr);
|
|
}
|
|
|
|
ssize_t UnixSocketRaw::Receive(void* msg,
|
|
size_t len,
|
|
ScopedFile* fd_vec,
|
|
size_t max_files) {
|
|
PERFETTO_DCHECK(fd_);
|
|
msghdr msg_hdr = {};
|
|
iovec iov = {msg, len};
|
|
msg_hdr.msg_iov = &iov;
|
|
msg_hdr.msg_iovlen = 1;
|
|
alignas(cmsghdr) char control_buf[256];
|
|
|
|
if (max_files > 0) {
|
|
msg_hdr.msg_control = control_buf;
|
|
msg_hdr.msg_controllen =
|
|
static_cast<CBufLenType>(CMSG_SPACE(max_files * sizeof(int)));
|
|
PERFETTO_CHECK(msg_hdr.msg_controllen <= sizeof(control_buf));
|
|
}
|
|
const ssize_t sz = PERFETTO_EINTR(recvmsg(*fd_, &msg_hdr, 0));
|
|
if (sz <= 0) {
|
|
return sz;
|
|
}
|
|
PERFETTO_CHECK(static_cast<size_t>(sz) <= len);
|
|
|
|
int* fds = nullptr;
|
|
uint32_t fds_len = 0;
|
|
|
|
if (max_files > 0) {
|
|
for (cmsghdr* cmsg = CMSG_FIRSTHDR(&msg_hdr); cmsg;
|
|
cmsg = CMSG_NXTHDR(&msg_hdr, cmsg)) {
|
|
const size_t payload_len = cmsg->cmsg_len - CMSG_LEN(0);
|
|
if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
|
|
PERFETTO_DCHECK(payload_len % sizeof(int) == 0u);
|
|
PERFETTO_CHECK(fds == nullptr);
|
|
fds = reinterpret_cast<int*>(CMSG_DATA(cmsg));
|
|
fds_len = static_cast<uint32_t>(payload_len / sizeof(int));
|
|
}
|
|
}
|
|
}
|
|
|
|
if (msg_hdr.msg_flags & MSG_TRUNC || msg_hdr.msg_flags & MSG_CTRUNC) {
|
|
for (size_t i = 0; fds && i < fds_len; ++i)
|
|
close(fds[i]);
|
|
errno = EMSGSIZE;
|
|
return -1;
|
|
}
|
|
|
|
for (size_t i = 0; fds && i < fds_len; ++i) {
|
|
if (i < max_files)
|
|
fd_vec[i].reset(fds[i]);
|
|
else
|
|
close(fds[i]);
|
|
}
|
|
|
|
return sz;
|
|
}
|
|
|
|
bool UnixSocketRaw::SetTxTimeout(uint32_t timeout_ms) {
|
|
PERFETTO_DCHECK(fd_);
|
|
struct timeval timeout {};
|
|
uint32_t timeout_sec = timeout_ms / 1000;
|
|
timeout.tv_sec = static_cast<decltype(timeout.tv_sec)>(timeout_sec);
|
|
timeout.tv_usec = static_cast<decltype(timeout.tv_usec)>(
|
|
(timeout_ms - (timeout_sec * 1000)) * 1000);
|
|
|
|
return setsockopt(*fd_, SOL_SOCKET, SO_SNDTIMEO,
|
|
reinterpret_cast<const char*>(&timeout),
|
|
sizeof(timeout)) == 0;
|
|
}
|
|
|
|
bool UnixSocketRaw::SetRxTimeout(uint32_t timeout_ms) {
|
|
PERFETTO_DCHECK(fd_);
|
|
struct timeval timeout {};
|
|
uint32_t timeout_sec = timeout_ms / 1000;
|
|
timeout.tv_sec = static_cast<decltype(timeout.tv_sec)>(timeout_sec);
|
|
timeout.tv_usec = static_cast<decltype(timeout.tv_usec)>(
|
|
(timeout_ms - (timeout_sec * 1000)) * 1000);
|
|
|
|
return setsockopt(*fd_, SOL_SOCKET, SO_RCVTIMEO,
|
|
reinterpret_cast<const char*>(&timeout),
|
|
sizeof(timeout)) == 0;
|
|
}
|
|
|
|
#pragma GCC diagnostic pop
|
|
|
|
// +--------------------+
|
|
// | UnixSocket methods |
|
|
// +--------------------+
|
|
|
|
// TODO(primiano): Add ThreadChecker to methods of this class.
|
|
|
|
// static
|
|
std::unique_ptr<UnixSocket> UnixSocket::Listen(const std::string& socket_name,
|
|
EventListener* event_listener,
|
|
TaskRunner* task_runner,
|
|
SockFamily sock_family,
|
|
SockType sock_type) {
|
|
auto sock_raw = UnixSocketRaw::CreateMayFail(sock_family, sock_type);
|
|
if (!sock_raw || !sock_raw.Bind(socket_name))
|
|
return nullptr;
|
|
|
|
// Forward the call to the Listen() overload below.
|
|
return Listen(sock_raw.ReleaseFd(), event_listener, task_runner, sock_family,
|
|
sock_type);
|
|
}
|
|
|
|
// static
|
|
std::unique_ptr<UnixSocket> UnixSocket::Listen(ScopedFile fd,
|
|
EventListener* event_listener,
|
|
TaskRunner* task_runner,
|
|
SockFamily sock_family,
|
|
SockType sock_type) {
|
|
return std::unique_ptr<UnixSocket>(
|
|
new UnixSocket(event_listener, task_runner, std::move(fd),
|
|
State::kListening, sock_family, sock_type));
|
|
}
|
|
|
|
// static
|
|
std::unique_ptr<UnixSocket> UnixSocket::Connect(const std::string& socket_name,
|
|
EventListener* event_listener,
|
|
TaskRunner* task_runner,
|
|
SockFamily sock_family,
|
|
SockType sock_type) {
|
|
std::unique_ptr<UnixSocket> sock(
|
|
new UnixSocket(event_listener, task_runner, sock_family, sock_type));
|
|
sock->DoConnect(socket_name);
|
|
return sock;
|
|
}
|
|
|
|
// static
|
|
std::unique_ptr<UnixSocket> UnixSocket::AdoptConnected(
|
|
ScopedFile fd,
|
|
EventListener* event_listener,
|
|
TaskRunner* task_runner,
|
|
SockFamily sock_family,
|
|
SockType sock_type) {
|
|
return std::unique_ptr<UnixSocket>(
|
|
new UnixSocket(event_listener, task_runner, std::move(fd),
|
|
State::kConnected, sock_family, sock_type));
|
|
}
|
|
|
|
UnixSocket::UnixSocket(EventListener* event_listener,
|
|
TaskRunner* task_runner,
|
|
SockFamily sock_family,
|
|
SockType sock_type)
|
|
: UnixSocket(event_listener,
|
|
task_runner,
|
|
ScopedFile(),
|
|
State::kDisconnected,
|
|
sock_family,
|
|
sock_type) {}
|
|
|
|
UnixSocket::UnixSocket(EventListener* event_listener,
|
|
TaskRunner* task_runner,
|
|
ScopedFile adopt_fd,
|
|
State adopt_state,
|
|
SockFamily sock_family,
|
|
SockType sock_type)
|
|
: event_listener_(event_listener),
|
|
task_runner_(task_runner),
|
|
weak_ptr_factory_(this) {
|
|
state_ = State::kDisconnected;
|
|
if (adopt_state == State::kDisconnected) {
|
|
PERFETTO_DCHECK(!adopt_fd);
|
|
sock_raw_ = UnixSocketRaw::CreateMayFail(sock_family, sock_type);
|
|
if (!sock_raw_) {
|
|
last_error_ = errno;
|
|
return;
|
|
}
|
|
} else if (adopt_state == State::kConnected) {
|
|
PERFETTO_DCHECK(adopt_fd);
|
|
sock_raw_ = UnixSocketRaw(std::move(adopt_fd), sock_family, sock_type);
|
|
state_ = State::kConnected;
|
|
ReadPeerCredentials();
|
|
} else if (adopt_state == State::kListening) {
|
|
// We get here from Listen().
|
|
|
|
// |adopt_fd| might genuinely be invalid if the bind() failed.
|
|
if (!adopt_fd) {
|
|
last_error_ = errno;
|
|
return;
|
|
}
|
|
|
|
sock_raw_ = UnixSocketRaw(std::move(adopt_fd), sock_family, sock_type);
|
|
if (!sock_raw_.Listen()) {
|
|
last_error_ = errno;
|
|
PERFETTO_DPLOG("listen()");
|
|
return;
|
|
}
|
|
state_ = State::kListening;
|
|
} else {
|
|
PERFETTO_FATAL("Unexpected adopt_state"); // Unfeasible.
|
|
}
|
|
|
|
PERFETTO_CHECK(sock_raw_);
|
|
last_error_ = 0;
|
|
|
|
sock_raw_.SetBlocking(false);
|
|
|
|
WeakPtr<UnixSocket> weak_ptr = weak_ptr_factory_.GetWeakPtr();
|
|
|
|
task_runner_->AddFileDescriptorWatch(sock_raw_.fd(), [weak_ptr] {
|
|
if (weak_ptr)
|
|
weak_ptr->OnEvent();
|
|
});
|
|
}
|
|
|
|
UnixSocket::~UnixSocket() {
|
|
// The implicit dtor of |weak_ptr_factory_| will no-op pending callbacks.
|
|
Shutdown(true);
|
|
}
|
|
|
|
UnixSocketRaw UnixSocket::ReleaseSocket() {
|
|
// This will invalidate any pending calls to OnEvent.
|
|
state_ = State::kDisconnected;
|
|
if (sock_raw_)
|
|
task_runner_->RemoveFileDescriptorWatch(sock_raw_.fd());
|
|
|
|
return std::move(sock_raw_);
|
|
}
|
|
|
|
// Called only by the Connect() static constructor.
|
|
void UnixSocket::DoConnect(const std::string& socket_name) {
|
|
PERFETTO_DCHECK(state_ == State::kDisconnected);
|
|
|
|
// This is the only thing that can gracefully fail in the ctor.
|
|
if (!sock_raw_)
|
|
return NotifyConnectionState(false);
|
|
|
|
if (!sock_raw_.Connect(socket_name)) {
|
|
last_error_ = errno;
|
|
return NotifyConnectionState(false);
|
|
}
|
|
|
|
// At this point either connect() succeeded or started asynchronously
|
|
// (errno = EINPROGRESS).
|
|
last_error_ = 0;
|
|
state_ = State::kConnecting;
|
|
|
|
// Even if the socket is non-blocking, connecting to a UNIX socket can be
|
|
// acknowledged straight away rather than returning EINPROGRESS.
|
|
// The decision here is to deal with the two cases uniformly, at the cost of
|
|
// delaying the straight-away-connect() case by one task, to avoid depending
|
|
// on implementation details of UNIX socket on the various OSes.
|
|
// Posting the OnEvent() below emulates a wakeup of the FD watch. OnEvent(),
|
|
// which knows how to deal with spurious wakeups, will poll the SO_ERROR and
|
|
// evolve, if necessary, the state into either kConnected or kDisconnected.
|
|
WeakPtr<UnixSocket> weak_ptr = weak_ptr_factory_.GetWeakPtr();
|
|
task_runner_->PostTask([weak_ptr] {
|
|
if (weak_ptr)
|
|
weak_ptr->OnEvent();
|
|
});
|
|
}
|
|
|
|
void UnixSocket::ReadPeerCredentials() {
|
|
// Peer credentials are supported only on AF_UNIX sockets.
|
|
if (sock_raw_.family() != SockFamily::kUnix)
|
|
return;
|
|
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_OS_LINUX) || \
|
|
PERFETTO_BUILDFLAG(PERFETTO_OS_ANDROID)
|
|
struct ucred user_cred;
|
|
socklen_t len = sizeof(user_cred);
|
|
int fd = sock_raw_.fd();
|
|
int res = getsockopt(fd, SOL_SOCKET, SO_PEERCRED, &user_cred, &len);
|
|
PERFETTO_CHECK(res == 0);
|
|
peer_uid_ = user_cred.uid;
|
|
peer_pid_ = user_cred.pid;
|
|
#else
|
|
struct xucred user_cred;
|
|
socklen_t len = sizeof(user_cred);
|
|
int res = getsockopt(sock_raw_.fd(), 0, LOCAL_PEERCRED, &user_cred, &len);
|
|
PERFETTO_CHECK(res == 0 && user_cred.cr_version == XUCRED_VERSION);
|
|
peer_uid_ = static_cast<uid_t>(user_cred.cr_uid);
|
|
// There is no pid in the LOCAL_PEERCREDS for MacOS / FreeBSD.
|
|
#endif
|
|
}
|
|
|
|
void UnixSocket::OnEvent() {
|
|
if (state_ == State::kDisconnected)
|
|
return; // Some spurious event, typically queued just before Shutdown().
|
|
|
|
if (state_ == State::kConnected)
|
|
return event_listener_->OnDataAvailable(this);
|
|
|
|
if (state_ == State::kConnecting) {
|
|
PERFETTO_DCHECK(sock_raw_);
|
|
int sock_err = EINVAL;
|
|
socklen_t err_len = sizeof(sock_err);
|
|
int res =
|
|
getsockopt(sock_raw_.fd(), SOL_SOCKET, SO_ERROR, &sock_err, &err_len);
|
|
|
|
if (res == 0 && sock_err == EINPROGRESS)
|
|
return; // Not connected yet, just a spurious FD watch wakeup.
|
|
if (res == 0 && sock_err == 0) {
|
|
ReadPeerCredentials();
|
|
state_ = State::kConnected;
|
|
return event_listener_->OnConnect(this, true /* connected */);
|
|
}
|
|
PERFETTO_DLOG("Connection error: %s", strerror(sock_err));
|
|
last_error_ = sock_err;
|
|
Shutdown(false);
|
|
return event_listener_->OnConnect(this, false /* connected */);
|
|
}
|
|
|
|
// New incoming connection.
|
|
if (state_ == State::kListening) {
|
|
// There could be more than one incoming connection behind each FD watch
|
|
// notification. Drain'em all.
|
|
for (;;) {
|
|
struct sockaddr_in cli_addr {};
|
|
socklen_t size = sizeof(cli_addr);
|
|
ScopedFile new_fd(PERFETTO_EINTR(accept(
|
|
sock_raw_.fd(), reinterpret_cast<sockaddr*>(&cli_addr), &size)));
|
|
if (!new_fd)
|
|
return;
|
|
std::unique_ptr<UnixSocket> new_sock(new UnixSocket(
|
|
event_listener_, task_runner_, std::move(new_fd), State::kConnected,
|
|
sock_raw_.family(), sock_raw_.type()));
|
|
event_listener_->OnNewIncomingConnection(this, std::move(new_sock));
|
|
}
|
|
}
|
|
}
|
|
|
|
bool UnixSocket::Send(const void* msg,
|
|
size_t len,
|
|
const int* send_fds,
|
|
size_t num_fds) {
|
|
if (state_ != State::kConnected) {
|
|
errno = last_error_ = ENOTCONN;
|
|
return false;
|
|
}
|
|
|
|
sock_raw_.SetBlocking(true);
|
|
const ssize_t sz = sock_raw_.Send(msg, len, send_fds, num_fds);
|
|
int saved_errno = errno;
|
|
sock_raw_.SetBlocking(false);
|
|
|
|
if (sz == static_cast<ssize_t>(len)) {
|
|
last_error_ = 0;
|
|
return true;
|
|
}
|
|
|
|
// If sendmsg() succeeds but the returned size is < |len| it means that the
|
|
// endpoint disconnected in the middle of the read, and we managed to send
|
|
// only a portion of the buffer. In this case we should just give up.
|
|
|
|
if (sz < 0 && (saved_errno == EAGAIN || saved_errno == EWOULDBLOCK)) {
|
|
// A genuine out-of-buffer. The client should retry or give up.
|
|
// Man pages specify that EAGAIN and EWOULDBLOCK have the same semantic here
|
|
// and clients should check for both.
|
|
last_error_ = EAGAIN;
|
|
return false;
|
|
}
|
|
|
|
// Either the other endpoint disconnected (ECONNRESET) or some other error
|
|
// happened.
|
|
last_error_ = saved_errno;
|
|
PERFETTO_DPLOG("sendmsg() failed");
|
|
Shutdown(true);
|
|
return false;
|
|
}
|
|
|
|
void UnixSocket::Shutdown(bool notify) {
|
|
WeakPtr<UnixSocket> weak_ptr = weak_ptr_factory_.GetWeakPtr();
|
|
if (notify) {
|
|
if (state_ == State::kConnected) {
|
|
task_runner_->PostTask([weak_ptr] {
|
|
if (weak_ptr)
|
|
weak_ptr->event_listener_->OnDisconnect(weak_ptr.get());
|
|
});
|
|
} else if (state_ == State::kConnecting) {
|
|
task_runner_->PostTask([weak_ptr] {
|
|
if (weak_ptr)
|
|
weak_ptr->event_listener_->OnConnect(weak_ptr.get(), false);
|
|
});
|
|
}
|
|
}
|
|
|
|
if (sock_raw_) {
|
|
task_runner_->RemoveFileDescriptorWatch(sock_raw_.fd());
|
|
sock_raw_.Shutdown();
|
|
}
|
|
state_ = State::kDisconnected;
|
|
}
|
|
|
|
size_t UnixSocket::Receive(void* msg,
|
|
size_t len,
|
|
ScopedFile* fd_vec,
|
|
size_t max_files) {
|
|
if (state_ != State::kConnected) {
|
|
last_error_ = ENOTCONN;
|
|
return 0;
|
|
}
|
|
|
|
const ssize_t sz = sock_raw_.Receive(msg, len, fd_vec, max_files);
|
|
if (sz < 0 && (errno == EAGAIN || errno == EWOULDBLOCK)) {
|
|
last_error_ = EAGAIN;
|
|
return 0;
|
|
}
|
|
if (sz <= 0) {
|
|
last_error_ = errno;
|
|
Shutdown(true);
|
|
return 0;
|
|
}
|
|
PERFETTO_CHECK(static_cast<size_t>(sz) <= len);
|
|
return static_cast<size_t>(sz);
|
|
}
|
|
|
|
std::string UnixSocket::ReceiveString(size_t max_length) {
|
|
std::unique_ptr<char[]> buf(new char[max_length + 1]);
|
|
size_t rsize = Receive(buf.get(), max_length);
|
|
PERFETTO_CHECK(static_cast<size_t>(rsize) <= max_length);
|
|
buf[static_cast<size_t>(rsize)] = '\0';
|
|
return std::string(buf.get());
|
|
}
|
|
|
|
void UnixSocket::NotifyConnectionState(bool success) {
|
|
if (!success)
|
|
Shutdown(false);
|
|
|
|
WeakPtr<UnixSocket> weak_ptr = weak_ptr_factory_.GetWeakPtr();
|
|
task_runner_->PostTask([weak_ptr, success] {
|
|
if (weak_ptr)
|
|
weak_ptr->event_listener_->OnConnect(weak_ptr.get(), success);
|
|
});
|
|
}
|
|
|
|
UnixSocket::EventListener::~EventListener() {}
|
|
void UnixSocket::EventListener::OnNewIncomingConnection(
|
|
UnixSocket*,
|
|
std::unique_ptr<UnixSocket>) {}
|
|
void UnixSocket::EventListener::OnConnect(UnixSocket*, bool) {}
|
|
void UnixSocket::EventListener::OnDisconnect(UnixSocket*) {}
|
|
void UnixSocket::EventListener::OnDataAvailable(UnixSocket*) {}
|
|
|
|
} // namespace base
|
|
} // namespace perfetto
|
|
// gen_amalgamated begin source: src/ipc/client_impl.cc
|
|
// gen_amalgamated begin header: src/ipc/client_impl.h
|
|
/*
|
|
* Copyright (C) 2017 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
#ifndef SRC_IPC_CLIENT_IMPL_H_
|
|
#define SRC_IPC_CLIENT_IMPL_H_
|
|
|
|
#include <list>
|
|
#include <map>
|
|
#include <memory>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/base/task_runner.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/scoped_file.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/unix_socket.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/ipc/client.h"
|
|
// gen_amalgamated expanded: #include "src/ipc/buffered_frame_deserializer.h"
|
|
|
|
namespace perfetto {
|
|
|
|
namespace protos {
|
|
namespace gen {
|
|
class IPCFrame_BindServiceReply;
|
|
class IPCFrame_InvokeMethodReply;
|
|
} // namespace gen
|
|
} // namespace protos
|
|
|
|
namespace base {
|
|
class TaskRunner;
|
|
} // namespace base
|
|
|
|
namespace ipc {
|
|
|
|
class ServiceDescriptor;
|
|
|
|
class ClientImpl : public Client, public base::UnixSocket::EventListener {
|
|
public:
|
|
ClientImpl(const char* socket_name, bool socket_retry, base::TaskRunner*);
|
|
~ClientImpl() override;
|
|
|
|
// Client implementation.
|
|
void BindService(base::WeakPtr<ServiceProxy>) override;
|
|
void UnbindService(ServiceID) override;
|
|
base::ScopedFile TakeReceivedFD() override;
|
|
|
|
// base::UnixSocket::EventListener implementation.
|
|
void OnConnect(base::UnixSocket*, bool connected) override;
|
|
void OnDisconnect(base::UnixSocket*) override;
|
|
void OnDataAvailable(base::UnixSocket*) override;
|
|
|
|
RequestID BeginInvoke(ServiceID,
|
|
const std::string& method_name,
|
|
MethodID remote_method_id,
|
|
const ProtoMessage& method_args,
|
|
bool drop_reply,
|
|
base::WeakPtr<ServiceProxy>,
|
|
int fd = -1);
|
|
|
|
private:
|
|
struct QueuedRequest {
|
|
QueuedRequest();
|
|
int type = 0; // From Frame::msg_case(), see wire_protocol.proto.
|
|
RequestID request_id = 0;
|
|
base::WeakPtr<ServiceProxy> service_proxy;
|
|
|
|
// Only for type == kMsgInvokeMethod.
|
|
std::string method_name;
|
|
};
|
|
|
|
ClientImpl(const ClientImpl&) = delete;
|
|
ClientImpl& operator=(const ClientImpl&) = delete;
|
|
|
|
void TryConnect();
|
|
bool SendFrame(const Frame&, int fd = -1);
|
|
void OnFrameReceived(const Frame&);
|
|
void OnBindServiceReply(QueuedRequest,
|
|
const protos::gen::IPCFrame_BindServiceReply&);
|
|
void OnInvokeMethodReply(QueuedRequest,
|
|
const protos::gen::IPCFrame_InvokeMethodReply&);
|
|
|
|
bool invoking_method_reply_ = false;
|
|
const char* socket_name_ = nullptr;
|
|
bool socket_retry_ = false;
|
|
uint32_t socket_backoff_ms_ = 0;
|
|
std::unique_ptr<base::UnixSocket> sock_;
|
|
base::TaskRunner* const task_runner_;
|
|
RequestID last_request_id_ = 0;
|
|
BufferedFrameDeserializer frame_deserializer_;
|
|
base::ScopedFile received_fd_;
|
|
std::map<RequestID, QueuedRequest> queued_requests_;
|
|
std::map<ServiceID, base::WeakPtr<ServiceProxy>> service_bindings_;
|
|
|
|
// Queue of calls to BindService() that happened before the socket connected.
|
|
std::list<base::WeakPtr<ServiceProxy>> queued_bindings_;
|
|
|
|
base::WeakPtrFactory<Client> weak_ptr_factory_; // Keep last.
|
|
};
|
|
|
|
} // namespace ipc
|
|
} // namespace perfetto
|
|
|
|
#endif // SRC_IPC_CLIENT_IMPL_H_
|
|
/*
|
|
* Copyright (C) 2017 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
// gen_amalgamated expanded: #include "src/ipc/client_impl.h"
|
|
|
|
#include <fcntl.h>
|
|
#include <inttypes.h>
|
|
#include <unistd.h>
|
|
|
|
#include <utility>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/base/task_runner.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/utils.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/ipc/service_descriptor.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/ipc/service_proxy.h"
|
|
|
|
// gen_amalgamated expanded: #include "protos/perfetto/ipc/wire_protocol.gen.h"
|
|
|
|
// TODO(primiano): Add ThreadChecker everywhere.
|
|
|
|
// TODO(primiano): Add timeouts.
|
|
|
|
namespace perfetto {
|
|
namespace ipc {
|
|
|
|
// static
|
|
std::unique_ptr<Client> Client::CreateInstance(const char* socket_name,
|
|
bool socket_retry,
|
|
base::TaskRunner* task_runner) {
|
|
std::unique_ptr<Client> client(
|
|
new ClientImpl(socket_name, socket_retry, task_runner));
|
|
return client;
|
|
}
|
|
|
|
ClientImpl::ClientImpl(const char* socket_name,
|
|
bool socket_retry,
|
|
base::TaskRunner* task_runner)
|
|
: socket_name_(socket_name),
|
|
socket_retry_(socket_retry),
|
|
task_runner_(task_runner),
|
|
weak_ptr_factory_(this) {
|
|
TryConnect();
|
|
}
|
|
|
|
ClientImpl::~ClientImpl() {
|
|
// Ensure we are not destroyed in the middle of invoking a reply.
|
|
PERFETTO_DCHECK(!invoking_method_reply_);
|
|
OnDisconnect(
|
|
nullptr); // The base::UnixSocket* ptr is not used in OnDisconnect().
|
|
}
|
|
|
|
void ClientImpl::TryConnect() {
|
|
sock_ = base::UnixSocket::Connect(socket_name_, this, task_runner_,
|
|
base::SockFamily::kUnix,
|
|
base::SockType::kStream);
|
|
}
|
|
|
|
void ClientImpl::BindService(base::WeakPtr<ServiceProxy> service_proxy) {
|
|
if (!service_proxy)
|
|
return;
|
|
if (!sock_->is_connected()) {
|
|
queued_bindings_.emplace_back(service_proxy);
|
|
return;
|
|
}
|
|
RequestID request_id = ++last_request_id_;
|
|
Frame frame;
|
|
frame.set_request_id(request_id);
|
|
Frame::BindService* req = frame.mutable_msg_bind_service();
|
|
const char* const service_name = service_proxy->GetDescriptor().service_name;
|
|
req->set_service_name(service_name);
|
|
if (!SendFrame(frame)) {
|
|
PERFETTO_DLOG("BindService(%s) failed", service_name);
|
|
return service_proxy->OnConnect(false /* success */);
|
|
}
|
|
QueuedRequest qr;
|
|
qr.type = Frame::kMsgBindServiceFieldNumber;
|
|
qr.request_id = request_id;
|
|
qr.service_proxy = service_proxy;
|
|
queued_requests_.emplace(request_id, std::move(qr));
|
|
}
|
|
|
|
void ClientImpl::UnbindService(ServiceID service_id) {
|
|
service_bindings_.erase(service_id);
|
|
}
|
|
|
|
RequestID ClientImpl::BeginInvoke(ServiceID service_id,
|
|
const std::string& method_name,
|
|
MethodID remote_method_id,
|
|
const ProtoMessage& method_args,
|
|
bool drop_reply,
|
|
base::WeakPtr<ServiceProxy> service_proxy,
|
|
int fd) {
|
|
RequestID request_id = ++last_request_id_;
|
|
Frame frame;
|
|
frame.set_request_id(request_id);
|
|
Frame::InvokeMethod* req = frame.mutable_msg_invoke_method();
|
|
req->set_service_id(service_id);
|
|
req->set_method_id(remote_method_id);
|
|
req->set_drop_reply(drop_reply);
|
|
req->set_args_proto(method_args.SerializeAsString());
|
|
if (!SendFrame(frame, fd)) {
|
|
PERFETTO_DLOG("BeginInvoke() failed while sending the frame");
|
|
return 0;
|
|
}
|
|
if (drop_reply)
|
|
return 0;
|
|
QueuedRequest qr;
|
|
qr.type = Frame::kMsgInvokeMethodFieldNumber;
|
|
qr.request_id = request_id;
|
|
qr.method_name = method_name;
|
|
qr.service_proxy = std::move(service_proxy);
|
|
queued_requests_.emplace(request_id, std::move(qr));
|
|
return request_id;
|
|
}
|
|
|
|
bool ClientImpl::SendFrame(const Frame& frame, int fd) {
|
|
// Serialize the frame into protobuf, add the size header, and send it.
|
|
std::string buf = BufferedFrameDeserializer::Serialize(frame);
|
|
|
|
// TODO(primiano): this should do non-blocking I/O. But then what if the
|
|
// socket buffer is full? We might want to either drop the request or throttle
|
|
// the send and PostTask the reply later? Right now we are making Send()
|
|
// blocking as a workaround. Propagate bakpressure to the caller instead.
|
|
bool res = sock_->Send(buf.data(), buf.size(), fd);
|
|
PERFETTO_CHECK(res || !sock_->is_connected());
|
|
return res;
|
|
}
|
|
|
|
void ClientImpl::OnConnect(base::UnixSocket*, bool connected) {
|
|
if (!connected && socket_retry_) {
|
|
socket_backoff_ms_ =
|
|
(socket_backoff_ms_ < 10000) ? socket_backoff_ms_ + 1000 : 30000;
|
|
PERFETTO_DLOG(
|
|
"Connection to traced's UNIX socket failed, retrying in %u seconds",
|
|
socket_backoff_ms_ / 1000);
|
|
auto weak_this = weak_ptr_factory_.GetWeakPtr();
|
|
task_runner_->PostDelayedTask(
|
|
[weak_this] {
|
|
if (weak_this)
|
|
static_cast<ClientImpl&>(*weak_this).TryConnect();
|
|
},
|
|
socket_backoff_ms_);
|
|
return;
|
|
}
|
|
|
|
// Drain the BindService() calls that were queued before establishig the
|
|
// connection with the host.
|
|
for (base::WeakPtr<ServiceProxy>& service_proxy : queued_bindings_) {
|
|
if (connected) {
|
|
BindService(service_proxy);
|
|
} else if (service_proxy) {
|
|
service_proxy->OnConnect(false /* success */);
|
|
}
|
|
}
|
|
queued_bindings_.clear();
|
|
}
|
|
|
|
void ClientImpl::OnDisconnect(base::UnixSocket*) {
|
|
for (auto it : service_bindings_) {
|
|
base::WeakPtr<ServiceProxy>& service_proxy = it.second;
|
|
task_runner_->PostTask([service_proxy] {
|
|
if (service_proxy)
|
|
service_proxy->OnDisconnect();
|
|
});
|
|
}
|
|
service_bindings_.clear();
|
|
queued_bindings_.clear();
|
|
}
|
|
|
|
void ClientImpl::OnDataAvailable(base::UnixSocket*) {
|
|
size_t rsize;
|
|
do {
|
|
auto buf = frame_deserializer_.BeginReceive();
|
|
base::ScopedFile fd;
|
|
rsize = sock_->Receive(buf.data, buf.size, &fd);
|
|
if (fd) {
|
|
PERFETTO_DCHECK(!received_fd_);
|
|
int res = fcntl(*fd, F_SETFD, FD_CLOEXEC);
|
|
PERFETTO_DCHECK(res == 0);
|
|
received_fd_ = std::move(fd);
|
|
}
|
|
if (!frame_deserializer_.EndReceive(rsize)) {
|
|
// The endpoint tried to send a frame that is way too large.
|
|
return sock_->Shutdown(true); // In turn will trigger an OnDisconnect().
|
|
// TODO(fmayer): check this.
|
|
}
|
|
} while (rsize > 0);
|
|
|
|
while (std::unique_ptr<Frame> frame = frame_deserializer_.PopNextFrame())
|
|
OnFrameReceived(*frame);
|
|
}
|
|
|
|
void ClientImpl::OnFrameReceived(const Frame& frame) {
|
|
auto queued_requests_it = queued_requests_.find(frame.request_id());
|
|
if (queued_requests_it == queued_requests_.end()) {
|
|
PERFETTO_DLOG("OnFrameReceived(): got invalid request_id=%" PRIu64,
|
|
static_cast<uint64_t>(frame.request_id()));
|
|
return;
|
|
}
|
|
QueuedRequest req = std::move(queued_requests_it->second);
|
|
queued_requests_.erase(queued_requests_it);
|
|
|
|
if (req.type == Frame::kMsgBindServiceFieldNumber &&
|
|
frame.has_msg_bind_service_reply()) {
|
|
return OnBindServiceReply(std::move(req), frame.msg_bind_service_reply());
|
|
}
|
|
if (req.type == Frame::kMsgInvokeMethodFieldNumber &&
|
|
frame.has_msg_invoke_method_reply()) {
|
|
return OnInvokeMethodReply(std::move(req), frame.msg_invoke_method_reply());
|
|
}
|
|
if (frame.has_msg_request_error()) {
|
|
PERFETTO_DLOG("Host error: %s", frame.msg_request_error().error().c_str());
|
|
return;
|
|
}
|
|
|
|
PERFETTO_DLOG(
|
|
"OnFrameReceived() request type=%d, received unknown frame in reply to "
|
|
"request_id=%" PRIu64,
|
|
req.type, static_cast<uint64_t>(frame.request_id()));
|
|
}
|
|
|
|
void ClientImpl::OnBindServiceReply(QueuedRequest req,
|
|
const Frame::BindServiceReply& reply) {
|
|
base::WeakPtr<ServiceProxy>& service_proxy = req.service_proxy;
|
|
if (!service_proxy)
|
|
return;
|
|
const char* svc_name = service_proxy->GetDescriptor().service_name;
|
|
if (!reply.success()) {
|
|
PERFETTO_DLOG("BindService(): unknown service_name=\"%s\"", svc_name);
|
|
return service_proxy->OnConnect(false /* success */);
|
|
}
|
|
|
|
auto prev_service = service_bindings_.find(reply.service_id());
|
|
if (prev_service != service_bindings_.end() && prev_service->second.get()) {
|
|
PERFETTO_DLOG(
|
|
"BindService(): Trying to bind service \"%s\" but another service "
|
|
"named \"%s\" is already bound with the same ID.",
|
|
svc_name, prev_service->second->GetDescriptor().service_name);
|
|
return service_proxy->OnConnect(false /* success */);
|
|
}
|
|
|
|
// Build the method [name] -> [remote_id] map.
|
|
std::map<std::string, MethodID> methods;
|
|
for (const auto& method : reply.methods()) {
|
|
if (method.name().empty() || method.id() <= 0) {
|
|
PERFETTO_DLOG("OnBindServiceReply(): invalid method \"%s\" -> %" PRIu64,
|
|
method.name().c_str(), static_cast<uint64_t>(method.id()));
|
|
continue;
|
|
}
|
|
methods[method.name()] = method.id();
|
|
}
|
|
service_proxy->InitializeBinding(weak_ptr_factory_.GetWeakPtr(),
|
|
reply.service_id(), std::move(methods));
|
|
service_bindings_[reply.service_id()] = service_proxy;
|
|
service_proxy->OnConnect(true /* success */);
|
|
}
|
|
|
|
void ClientImpl::OnInvokeMethodReply(QueuedRequest req,
|
|
const Frame::InvokeMethodReply& reply) {
|
|
base::WeakPtr<ServiceProxy> service_proxy = req.service_proxy;
|
|
if (!service_proxy)
|
|
return;
|
|
std::unique_ptr<ProtoMessage> decoded_reply;
|
|
if (reply.success()) {
|
|
// If this becomes a hotspot, optimize by maintaining a dedicated hashtable.
|
|
for (const auto& method : service_proxy->GetDescriptor().methods) {
|
|
if (req.method_name == method.name) {
|
|
decoded_reply = method.reply_proto_decoder(reply.reply_proto());
|
|
break;
|
|
}
|
|
}
|
|
}
|
|
const RequestID request_id = req.request_id;
|
|
invoking_method_reply_ = true;
|
|
service_proxy->EndInvoke(request_id, std::move(decoded_reply),
|
|
reply.has_more());
|
|
invoking_method_reply_ = false;
|
|
|
|
// If this is a streaming method and future replies will be resolved, put back
|
|
// the |req| with the callback into the set of active requests.
|
|
if (reply.has_more())
|
|
queued_requests_.emplace(request_id, std::move(req));
|
|
}
|
|
|
|
ClientImpl::QueuedRequest::QueuedRequest() = default;
|
|
|
|
base::ScopedFile ClientImpl::TakeReceivedFD() {
|
|
return std::move(received_fd_);
|
|
}
|
|
|
|
} // namespace ipc
|
|
} // namespace perfetto
|
|
// gen_amalgamated begin source: src/ipc/service_proxy.cc
|
|
/*
|
|
* Copyright (C) 2017 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/ext/ipc/service_proxy.h"
|
|
|
|
#include <utility>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/base/logging.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/weak_ptr.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/ipc/service_descriptor.h"
|
|
// gen_amalgamated expanded: #include "src/ipc/client_impl.h"
|
|
|
|
// gen_amalgamated expanded: #include "protos/perfetto/ipc/wire_protocol.gen.h"
|
|
|
|
namespace perfetto {
|
|
namespace ipc {
|
|
|
|
ServiceProxy::ServiceProxy(EventListener* event_listener)
|
|
: event_listener_(event_listener), weak_ptr_factory_(this) {}
|
|
|
|
ServiceProxy::~ServiceProxy() {
|
|
if (client_ && connected())
|
|
client_->UnbindService(service_id_);
|
|
}
|
|
|
|
void ServiceProxy::InitializeBinding(
|
|
base::WeakPtr<Client> client,
|
|
ServiceID service_id,
|
|
std::map<std::string, MethodID> remote_method_ids) {
|
|
client_ = std::move(client);
|
|
service_id_ = service_id;
|
|
remote_method_ids_ = std::move(remote_method_ids);
|
|
}
|
|
|
|
void ServiceProxy::BeginInvoke(const std::string& method_name,
|
|
const ProtoMessage& request,
|
|
DeferredBase reply,
|
|
int fd) {
|
|
// |reply| will auto-resolve if it gets out of scope early.
|
|
if (!connected()) {
|
|
PERFETTO_DFATAL("Not connected.");
|
|
return;
|
|
}
|
|
if (!client_)
|
|
return; // The Client object has been destroyed in the meantime.
|
|
|
|
auto remote_method_it = remote_method_ids_.find(method_name);
|
|
RequestID request_id = 0;
|
|
const bool drop_reply = !reply.IsBound();
|
|
if (remote_method_it != remote_method_ids_.end()) {
|
|
request_id =
|
|
static_cast<ClientImpl*>(client_.get())
|
|
->BeginInvoke(service_id_, method_name, remote_method_it->second,
|
|
request, drop_reply, weak_ptr_factory_.GetWeakPtr(),
|
|
fd);
|
|
} else {
|
|
PERFETTO_DLOG("Cannot find method \"%s\" on the host", method_name.c_str());
|
|
}
|
|
|
|
// When passing |drop_reply| == true, the returned |request_id| should be 0.
|
|
PERFETTO_DCHECK(!drop_reply || !request_id);
|
|
|
|
if (!request_id)
|
|
return;
|
|
PERFETTO_DCHECK(pending_callbacks_.count(request_id) == 0);
|
|
pending_callbacks_.emplace(request_id, std::move(reply));
|
|
}
|
|
|
|
void ServiceProxy::EndInvoke(RequestID request_id,
|
|
std::unique_ptr<ProtoMessage> result,
|
|
bool has_more) {
|
|
auto callback_it = pending_callbacks_.find(request_id);
|
|
if (callback_it == pending_callbacks_.end()) {
|
|
// Either we are getting a reply for a method we never invoked, or we are
|
|
// getting a reply to a method marked drop_reply (that has been invoked
|
|
// without binding any callback in the Defererd response object).
|
|
PERFETTO_DFATAL("Unexpected reply received.");
|
|
return;
|
|
}
|
|
DeferredBase& reply_callback = callback_it->second;
|
|
AsyncResult<ProtoMessage> reply(std::move(result), has_more);
|
|
reply_callback.Resolve(std::move(reply));
|
|
if (!has_more)
|
|
pending_callbacks_.erase(callback_it);
|
|
}
|
|
|
|
void ServiceProxy::OnConnect(bool success) {
|
|
if (success) {
|
|
PERFETTO_DCHECK(service_id_);
|
|
return event_listener_->OnConnect();
|
|
}
|
|
return event_listener_->OnDisconnect();
|
|
}
|
|
|
|
void ServiceProxy::OnDisconnect() {
|
|
pending_callbacks_.clear(); // Will Reject() all the pending callbacks.
|
|
event_listener_->OnDisconnect();
|
|
}
|
|
|
|
base::WeakPtr<ServiceProxy> ServiceProxy::GetWeakPtr() const {
|
|
return weak_ptr_factory_.GetWeakPtr();
|
|
}
|
|
|
|
} // namespace ipc
|
|
} // namespace perfetto
|
|
// gen_amalgamated begin source: src/tracing/ipc/default_socket.cc
|
|
// gen_amalgamated begin header: include/perfetto/ext/tracing/ipc/default_socket.h
|
|
/*
|
|
* Copyright (C) 2018 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
#ifndef INCLUDE_PERFETTO_EXT_TRACING_IPC_DEFAULT_SOCKET_H_
|
|
#define INCLUDE_PERFETTO_EXT_TRACING_IPC_DEFAULT_SOCKET_H_
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/base/export.h"
|
|
|
|
namespace perfetto {
|
|
|
|
PERFETTO_EXPORT const char* GetConsumerSocket();
|
|
PERFETTO_EXPORT const char* GetProducerSocket();
|
|
|
|
} // namespace perfetto
|
|
|
|
#endif // INCLUDE_PERFETTO_EXT_TRACING_IPC_DEFAULT_SOCKET_H_
|
|
/*
|
|
* Copyright (C) 2018 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/ext/tracing/ipc/default_socket.h"
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/base/build_config.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/ipc/basic_types.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/basic_types.h"
|
|
|
|
#include <stdlib.h>
|
|
|
|
namespace perfetto {
|
|
|
|
static_assert(kInvalidUid == ipc::kInvalidUid, "kInvalidUid mismatching");
|
|
|
|
const char* GetProducerSocket() {
|
|
static const char* name = getenv("PERFETTO_PRODUCER_SOCK_NAME");
|
|
if (name == nullptr) {
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_OS_ANDROID)
|
|
name = "/dev/socket/traced_producer";
|
|
#else
|
|
name = "/tmp/perfetto-producer";
|
|
#endif
|
|
}
|
|
return name;
|
|
}
|
|
|
|
const char* GetConsumerSocket() {
|
|
static const char* name = getenv("PERFETTO_CONSUMER_SOCK_NAME");
|
|
if (name == nullptr) {
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_OS_ANDROID)
|
|
name = "/dev/socket/traced_consumer";
|
|
#else
|
|
name = "/tmp/perfetto-consumer";
|
|
#endif
|
|
}
|
|
return name;
|
|
}
|
|
|
|
} // namespace perfetto
|
|
// gen_amalgamated begin source: src/tracing/ipc/memfd.cc
|
|
// gen_amalgamated begin header: src/tracing/ipc/memfd.h
|
|
/*
|
|
* Copyright (C) 2020 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
#ifndef SRC_TRACING_IPC_MEMFD_H_
|
|
#define SRC_TRACING_IPC_MEMFD_H_
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/base/build_config.h"
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/scoped_file.h"
|
|
|
|
// Some android build bots use a sysroot that doesn't support memfd when
|
|
// compiling for the host, so we define the flags we need ourselves.
|
|
|
|
// from memfd.h
|
|
#ifndef MFD_CLOEXEC
|
|
#define MFD_CLOEXEC 0x0001U
|
|
#define MFD_ALLOW_SEALING 0x0002U
|
|
#endif
|
|
|
|
// from fcntl.h
|
|
#ifndef F_ADD_SEALS
|
|
#define F_ADD_SEALS 1033
|
|
#define F_GET_SEALS 1034
|
|
#define F_SEAL_SEAL 0x0001
|
|
#define F_SEAL_SHRINK 0x0002
|
|
#define F_SEAL_GROW 0x0004
|
|
#define F_SEAL_WRITE 0x0008
|
|
#endif
|
|
|
|
namespace perfetto {
|
|
|
|
// Whether the operating system supports memfd.
|
|
bool HasMemfdSupport();
|
|
|
|
// Call memfd(2) if available on platform and return the fd as result. This call
|
|
// also makes a kernel version check for safety on older kernels (b/116769556).
|
|
// Returns an invalid ScopedFile on failure.
|
|
base::ScopedFile CreateMemfd(const char* name, unsigned int flags);
|
|
|
|
} // namespace perfetto
|
|
|
|
#endif // SRC_TRACING_IPC_MEMFD_H_
|
|
/*
|
|
* Copyright (C) 2020 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
// gen_amalgamated expanded: #include "src/tracing/ipc/memfd.h"
|
|
|
|
#include <errno.h>
|
|
|
|
#define PERFETTO_MEMFD_ENABLED() \
|
|
PERFETTO_BUILDFLAG(PERFETTO_OS_ANDROID) || \
|
|
PERFETTO_BUILDFLAG(PERFETTO_OS_LINUX)
|
|
|
|
#if PERFETTO_MEMFD_ENABLED()
|
|
|
|
#include <stdio.h>
|
|
#include <string.h>
|
|
#include <sys/syscall.h>
|
|
#include <sys/utsname.h>
|
|
#include <unistd.h>
|
|
|
|
// Some android build bots use a sysroot that doesn't support memfd when
|
|
// compiling for the host, so we redefine it if necessary.
|
|
#if !defined(__NR_memfd_create)
|
|
#if defined(__x86_64__)
|
|
#define __NR_memfd_create 319
|
|
#elif defined(__i386__)
|
|
#define __NR_memfd_create 356
|
|
#elif defined(__aarch64__)
|
|
#define __NR_memfd_create 279
|
|
#elif defined(__arm__)
|
|
#define __NR_memfd_create 385
|
|
#else
|
|
#error "unsupported sysroot without memfd support"
|
|
#endif
|
|
#endif // !defined(__NR_memfd_create)
|
|
|
|
namespace perfetto {
|
|
bool HasMemfdSupport() {
|
|
static bool kSupportsMemfd = [] {
|
|
// Check kernel version supports memfd_create(). Some older kernels segfault
|
|
// executing memfd_create() rather than returning ENOSYS (b/116769556).
|
|
static constexpr int kRequiredMajor = 3;
|
|
static constexpr int kRequiredMinor = 17;
|
|
struct utsname uts;
|
|
int major, minor;
|
|
if (uname(&uts) == 0 && strcmp(uts.sysname, "Linux") == 0 &&
|
|
sscanf(uts.release, "%d.%d", &major, &minor) == 2 &&
|
|
((major < kRequiredMajor ||
|
|
(major == kRequiredMajor && minor < kRequiredMinor)))) {
|
|
return false;
|
|
}
|
|
|
|
base::ScopedFile fd;
|
|
fd.reset(static_cast<int>(syscall(__NR_memfd_create, "perfetto_shmem",
|
|
MFD_CLOEXEC | MFD_ALLOW_SEALING)));
|
|
return !!fd;
|
|
}();
|
|
return kSupportsMemfd;
|
|
}
|
|
|
|
base::ScopedFile CreateMemfd(const char* name, unsigned int flags) {
|
|
if (!HasMemfdSupport()) {
|
|
errno = ENOSYS;
|
|
return base::ScopedFile();
|
|
}
|
|
return base::ScopedFile(
|
|
static_cast<int>(syscall(__NR_memfd_create, name, flags)));
|
|
}
|
|
} // namespace perfetto
|
|
|
|
#else // PERFETTO_MEMFD_ENABLED()
|
|
|
|
namespace perfetto {
|
|
bool HasMemfdSupport() {
|
|
return false;
|
|
}
|
|
base::ScopedFile CreateMemfd(const char*, unsigned int) {
|
|
errno = ENOSYS;
|
|
return base::ScopedFile();
|
|
}
|
|
} // namespace perfetto
|
|
|
|
#endif // PERFETTO_MEMFD_ENABLED()
|
|
// gen_amalgamated begin source: src/tracing/ipc/posix_shared_memory.cc
|
|
// gen_amalgamated begin header: src/tracing/ipc/posix_shared_memory.h
|
|
/*
|
|
* Copyright (C) 2017 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
#ifndef SRC_TRACING_IPC_POSIX_SHARED_MEMORY_H_
|
|
#define SRC_TRACING_IPC_POSIX_SHARED_MEMORY_H_
|
|
|
|
#include <stddef.h>
|
|
|
|
#include <memory>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/base/build_config.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/scoped_file.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/shared_memory.h"
|
|
|
|
namespace perfetto {
|
|
|
|
// Implements the SharedMemory and its factory for the posix-based transport.
|
|
class PosixSharedMemory : public SharedMemory {
|
|
public:
|
|
class Factory : public SharedMemory::Factory {
|
|
public:
|
|
~Factory() override;
|
|
std::unique_ptr<SharedMemory> CreateSharedMemory(size_t) override;
|
|
};
|
|
|
|
// Create a brand new SHM region.
|
|
static std::unique_ptr<PosixSharedMemory> Create(size_t size);
|
|
|
|
// Mmaps a file descriptor to an existing SHM region. If
|
|
// |require_seals_if_supported| is true and the system supports
|
|
// memfd_create(), the FD is required to be a sealed memfd with F_SEAL_SEAL,
|
|
// F_SEAL_GROW, and F_SEAL_SHRINK seals set (otherwise, nullptr is returned).
|
|
// May also return nullptr if mapping fails for another reason (e.g. OOM).
|
|
static std::unique_ptr<PosixSharedMemory> AttachToFd(
|
|
base::ScopedFile,
|
|
bool require_seals_if_supported = true);
|
|
|
|
~PosixSharedMemory() override;
|
|
|
|
int fd() const override { return fd_.get(); }
|
|
|
|
// SharedMemory implementation.
|
|
void* start() const override { return start_; }
|
|
size_t size() const override { return size_; }
|
|
|
|
private:
|
|
static std::unique_ptr<PosixSharedMemory> MapFD(base::ScopedFile, size_t);
|
|
|
|
PosixSharedMemory(void* start, size_t size, base::ScopedFile);
|
|
PosixSharedMemory(const PosixSharedMemory&) = delete;
|
|
PosixSharedMemory& operator=(const PosixSharedMemory&) = delete;
|
|
|
|
void* const start_;
|
|
const size_t size_;
|
|
base::ScopedFile fd_;
|
|
};
|
|
|
|
} // namespace perfetto
|
|
|
|
#endif // SRC_TRACING_IPC_POSIX_SHARED_MEMORY_H_
|
|
/*
|
|
* Copyright (C) 2017 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
// gen_amalgamated expanded: #include "src/tracing/ipc/posix_shared_memory.h"
|
|
|
|
#include <fcntl.h>
|
|
#include <stdint.h>
|
|
#include <stdio.h>
|
|
#include <stdlib.h>
|
|
#include <sys/mman.h>
|
|
#include <sys/stat.h>
|
|
#include <unistd.h>
|
|
|
|
#include <memory>
|
|
#include <utility>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/base/compiler.h"
|
|
// gen_amalgamated expanded: #include "perfetto/base/logging.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/temp_file.h"
|
|
// gen_amalgamated expanded: #include "src/tracing/ipc/memfd.h"
|
|
|
|
namespace perfetto {
|
|
|
|
namespace {
|
|
int kFileSeals = F_SEAL_SHRINK | F_SEAL_GROW | F_SEAL_SEAL;
|
|
} // namespace
|
|
|
|
// static
|
|
std::unique_ptr<PosixSharedMemory> PosixSharedMemory::Create(size_t size) {
|
|
base::ScopedFile fd =
|
|
CreateMemfd("perfetto_shmem", MFD_CLOEXEC | MFD_ALLOW_SEALING);
|
|
bool is_memfd = !!fd;
|
|
|
|
// In-tree builds only allow mem_fd, so we can inspect the seals to verify the
|
|
// fd is appropriately sealed. We'll crash in the PERFETTO_CHECK(fd) below if
|
|
// memfd_create failed.
|
|
#if !PERFETTO_BUILDFLAG(PERFETTO_ANDROID_BUILD)
|
|
if (!fd) {
|
|
// TODO: if this fails on Android we should fall back on ashmem.
|
|
PERFETTO_DPLOG("memfd_create() failed");
|
|
fd = base::TempFile::CreateUnlinked().ReleaseFD();
|
|
}
|
|
#endif
|
|
|
|
PERFETTO_CHECK(fd);
|
|
int res = ftruncate(fd.get(), static_cast<off_t>(size));
|
|
PERFETTO_CHECK(res == 0);
|
|
|
|
if (is_memfd) {
|
|
// When memfd is supported, file seals should be, too.
|
|
res = fcntl(*fd, F_ADD_SEALS, kFileSeals);
|
|
PERFETTO_DCHECK(res == 0);
|
|
}
|
|
|
|
return MapFD(std::move(fd), size);
|
|
}
|
|
|
|
// static
|
|
std::unique_ptr<PosixSharedMemory> PosixSharedMemory::AttachToFd(
|
|
base::ScopedFile fd,
|
|
bool require_seals_if_supported) {
|
|
bool requires_seals = require_seals_if_supported;
|
|
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_ANDROID_BUILD)
|
|
// In-tree kernels all support memfd.
|
|
PERFETTO_CHECK(HasMemfdSupport());
|
|
#else
|
|
// In out-of-tree builds, we only require seals if the kernel supports memfd.
|
|
if (requires_seals)
|
|
requires_seals = HasMemfdSupport();
|
|
#endif
|
|
|
|
if (requires_seals) {
|
|
// If the system supports memfd, we require a sealed memfd.
|
|
int res = fcntl(*fd, F_GET_SEALS);
|
|
if (res == -1 || (res & kFileSeals) != kFileSeals) {
|
|
PERFETTO_PLOG("Couldn't verify file seals on shmem FD");
|
|
return nullptr;
|
|
}
|
|
}
|
|
|
|
struct stat stat_buf = {};
|
|
int res = fstat(fd.get(), &stat_buf);
|
|
PERFETTO_CHECK(res == 0 && stat_buf.st_size > 0);
|
|
return MapFD(std::move(fd), static_cast<size_t>(stat_buf.st_size));
|
|
}
|
|
|
|
// static
|
|
std::unique_ptr<PosixSharedMemory> PosixSharedMemory::MapFD(base::ScopedFile fd,
|
|
size_t size) {
|
|
PERFETTO_DCHECK(fd);
|
|
PERFETTO_DCHECK(size > 0);
|
|
void* start =
|
|
mmap(nullptr, size, PROT_READ | PROT_WRITE, MAP_SHARED, fd.get(), 0);
|
|
PERFETTO_CHECK(start != MAP_FAILED);
|
|
return std::unique_ptr<PosixSharedMemory>(
|
|
new PosixSharedMemory(start, size, std::move(fd)));
|
|
}
|
|
|
|
PosixSharedMemory::PosixSharedMemory(void* start,
|
|
size_t size,
|
|
base::ScopedFile fd)
|
|
: start_(start), size_(size), fd_(std::move(fd)) {}
|
|
|
|
PosixSharedMemory::~PosixSharedMemory() {
|
|
munmap(start(), size());
|
|
}
|
|
|
|
PosixSharedMemory::Factory::~Factory() {}
|
|
|
|
std::unique_ptr<SharedMemory> PosixSharedMemory::Factory::CreateSharedMemory(
|
|
size_t size) {
|
|
return PosixSharedMemory::Create(size);
|
|
}
|
|
|
|
} // namespace perfetto
|
|
// gen_amalgamated begin source: src/tracing/ipc/consumer/consumer_ipc_client_impl.cc
|
|
// gen_amalgamated begin header: src/tracing/ipc/consumer/consumer_ipc_client_impl.h
|
|
// gen_amalgamated begin header: include/perfetto/ext/tracing/ipc/consumer_ipc_client.h
|
|
/*
|
|
* Copyright (C) 2017 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
#ifndef INCLUDE_PERFETTO_EXT_TRACING_IPC_CONSUMER_IPC_CLIENT_H_
|
|
#define INCLUDE_PERFETTO_EXT_TRACING_IPC_CONSUMER_IPC_CLIENT_H_
|
|
|
|
#include <memory>
|
|
#include <string>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/base/export.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/tracing_service.h"
|
|
|
|
namespace perfetto {
|
|
|
|
class Consumer;
|
|
|
|
// Allows to connect to a remote Service through a UNIX domain socket.
|
|
// Exposed to:
|
|
// Consumer(s) of the tracing library.
|
|
// Implemented in:
|
|
// src/tracing/ipc/consumer/consumer_ipc_client_impl.cc
|
|
class PERFETTO_EXPORT ConsumerIPCClient {
|
|
public:
|
|
// Connects to the producer port of the Service listening on the given
|
|
// |service_sock_name|. If the connection is successful, the OnConnect()
|
|
// method will be invoked asynchronously on the passed Consumer interface.
|
|
// If the connection fails, OnDisconnect() will be invoked instead.
|
|
// The returned ConsumerEndpoint serves also to delimit the scope of the
|
|
// callbacks invoked on the Consumer interface: no more Consumer callbacks are
|
|
// invoked immediately after its destruction and any pending callback will be
|
|
// dropped.
|
|
static std::unique_ptr<TracingService::ConsumerEndpoint>
|
|
Connect(const char* service_sock_name, Consumer*, base::TaskRunner*);
|
|
|
|
protected:
|
|
ConsumerIPCClient() = delete;
|
|
};
|
|
|
|
} // namespace perfetto
|
|
|
|
#endif // INCLUDE_PERFETTO_EXT_TRACING_IPC_CONSUMER_IPC_CLIENT_H_
|
|
/*
|
|
* Copyright (C) 2017 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
#ifndef SRC_TRACING_IPC_CONSUMER_CONSUMER_IPC_CLIENT_IMPL_H_
|
|
#define SRC_TRACING_IPC_CONSUMER_CONSUMER_IPC_CLIENT_IMPL_H_
|
|
|
|
#include <stdint.h>
|
|
|
|
#include <list>
|
|
#include <vector>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/scoped_file.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/weak_ptr.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/ipc/service_proxy.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/basic_types.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/trace_packet.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/tracing_service.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/tracing/ipc/consumer_ipc_client.h"
|
|
// gen_amalgamated expanded: #include "perfetto/tracing/core/forward_decls.h"
|
|
|
|
// gen_amalgamated expanded: #include "protos/perfetto/ipc/consumer_port.ipc.h"
|
|
|
|
namespace perfetto {
|
|
|
|
namespace base {
|
|
class TaskRunner;
|
|
} // namespace base
|
|
|
|
namespace ipc {
|
|
class Client;
|
|
} // namespace ipc
|
|
|
|
class Consumer;
|
|
|
|
// Exposes a Service endpoint to Consumer(s), proxying all requests through a
|
|
// IPC channel to the remote Service. This class is the glue layer between the
|
|
// generic Service interface exposed to the clients of the library and the
|
|
// actual IPC transport.
|
|
class ConsumerIPCClientImpl : public TracingService::ConsumerEndpoint,
|
|
public ipc::ServiceProxy::EventListener {
|
|
public:
|
|
ConsumerIPCClientImpl(const char* service_sock_name,
|
|
Consumer*,
|
|
base::TaskRunner*);
|
|
~ConsumerIPCClientImpl() override;
|
|
|
|
// TracingService::ConsumerEndpoint implementation.
|
|
// These methods are invoked by the actual Consumer(s) code by clients of the
|
|
// tracing library, which know nothing about the IPC transport.
|
|
void EnableTracing(const TraceConfig&, base::ScopedFile) override;
|
|
void StartTracing() override;
|
|
void ChangeTraceConfig(const TraceConfig&) override;
|
|
void DisableTracing() override;
|
|
void ReadBuffers() override;
|
|
void FreeBuffers() override;
|
|
void Flush(uint32_t timeout_ms, FlushCallback) override;
|
|
void Detach(const std::string& key) override;
|
|
void Attach(const std::string& key) override;
|
|
void GetTraceStats() override;
|
|
void ObserveEvents(uint32_t enabled_event_types) override;
|
|
void QueryServiceState(QueryServiceStateCallback) override;
|
|
void QueryCapabilities(QueryCapabilitiesCallback) override;
|
|
|
|
// ipc::ServiceProxy::EventListener implementation.
|
|
// These methods are invoked by the IPC layer, which knows nothing about
|
|
// tracing, consumers and consumers.
|
|
void OnConnect() override;
|
|
void OnDisconnect() override;
|
|
|
|
private:
|
|
struct PendingQueryServiceRequest {
|
|
QueryServiceStateCallback callback;
|
|
|
|
// All the replies will be appended here until |has_more| == false.
|
|
std::vector<uint8_t> merged_resp;
|
|
};
|
|
|
|
// List because we need stable iterators.
|
|
using PendingQueryServiceRequests = std::list<PendingQueryServiceRequest>;
|
|
|
|
void OnReadBuffersResponse(
|
|
ipc::AsyncResult<protos::gen::ReadBuffersResponse>);
|
|
void OnEnableTracingResponse(
|
|
ipc::AsyncResult<protos::gen::EnableTracingResponse>);
|
|
void OnQueryServiceStateResponse(
|
|
ipc::AsyncResult<protos::gen::QueryServiceStateResponse>,
|
|
PendingQueryServiceRequests::iterator);
|
|
|
|
// TODO(primiano): think to dtor order, do we rely on any specific sequence?
|
|
Consumer* const consumer_;
|
|
|
|
// The object that owns the client socket and takes care of IPC traffic.
|
|
std::unique_ptr<ipc::Client> ipc_channel_;
|
|
|
|
// The proxy interface for the consumer port of the service. It is bound
|
|
// to |ipc_channel_| and (de)serializes method invocations over the wire.
|
|
protos::gen::ConsumerPortProxy consumer_port_;
|
|
|
|
bool connected_ = false;
|
|
|
|
PendingQueryServiceRequests pending_query_svc_reqs_;
|
|
|
|
// When a packet is too big to fit into a ReadBuffersResponse IPC, the service
|
|
// will chunk it into several IPCs, each containing few slices of the packet
|
|
// (a packet's slice is always guaranteed to be << kIPCBufferSize). When
|
|
// chunking happens this field accumulates the slices received until the
|
|
// one with |last_slice_for_packet| == true is received.
|
|
TracePacket partial_packet_;
|
|
|
|
// Keep last.
|
|
base::WeakPtrFactory<ConsumerIPCClientImpl> weak_ptr_factory_;
|
|
};
|
|
|
|
} // namespace perfetto
|
|
|
|
#endif // SRC_TRACING_IPC_CONSUMER_CONSUMER_IPC_CLIENT_IMPL_H_
|
|
/*
|
|
* Copyright (C) 2017 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
// gen_amalgamated expanded: #include "src/tracing/ipc/consumer/consumer_ipc_client_impl.h"
|
|
|
|
#include <inttypes.h>
|
|
#include <string.h>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/base/task_runner.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/ipc/client.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/consumer.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/observable_events.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/trace_stats.h"
|
|
// gen_amalgamated expanded: #include "perfetto/tracing/core/trace_config.h"
|
|
// gen_amalgamated expanded: #include "perfetto/tracing/core/tracing_service_state.h"
|
|
|
|
// TODO(fmayer): Add a test to check to what happens when ConsumerIPCClientImpl
|
|
// gets destroyed w.r.t. the Consumer pointer. Also think to lifetime of the
|
|
// Consumer* during the callbacks.
|
|
|
|
namespace perfetto {
|
|
|
|
// static. (Declared in include/tracing/ipc/consumer_ipc_client.h).
|
|
std::unique_ptr<TracingService::ConsumerEndpoint> ConsumerIPCClient::Connect(
|
|
const char* service_sock_name,
|
|
Consumer* consumer,
|
|
base::TaskRunner* task_runner) {
|
|
return std::unique_ptr<TracingService::ConsumerEndpoint>(
|
|
new ConsumerIPCClientImpl(service_sock_name, consumer, task_runner));
|
|
}
|
|
|
|
ConsumerIPCClientImpl::ConsumerIPCClientImpl(const char* service_sock_name,
|
|
Consumer* consumer,
|
|
base::TaskRunner* task_runner)
|
|
: consumer_(consumer),
|
|
ipc_channel_(ipc::Client::CreateInstance(service_sock_name,
|
|
/*retry=*/false,
|
|
task_runner)),
|
|
consumer_port_(this /* event_listener */),
|
|
weak_ptr_factory_(this) {
|
|
ipc_channel_->BindService(consumer_port_.GetWeakPtr());
|
|
}
|
|
|
|
ConsumerIPCClientImpl::~ConsumerIPCClientImpl() = default;
|
|
|
|
// Called by the IPC layer if the BindService() succeeds.
|
|
void ConsumerIPCClientImpl::OnConnect() {
|
|
connected_ = true;
|
|
consumer_->OnConnect();
|
|
}
|
|
|
|
void ConsumerIPCClientImpl::OnDisconnect() {
|
|
PERFETTO_DLOG("Tracing service connection failure");
|
|
connected_ = false;
|
|
consumer_->OnDisconnect();
|
|
}
|
|
|
|
void ConsumerIPCClientImpl::EnableTracing(const TraceConfig& trace_config,
|
|
base::ScopedFile fd) {
|
|
if (!connected_) {
|
|
PERFETTO_DLOG("Cannot EnableTracing(), not connected to tracing service");
|
|
return;
|
|
}
|
|
|
|
protos::gen::EnableTracingRequest req;
|
|
*req.mutable_trace_config() = trace_config;
|
|
ipc::Deferred<protos::gen::EnableTracingResponse> async_response;
|
|
auto weak_this = weak_ptr_factory_.GetWeakPtr();
|
|
async_response.Bind(
|
|
[weak_this](
|
|
ipc::AsyncResult<protos::gen::EnableTracingResponse> response) {
|
|
if (weak_this)
|
|
weak_this->OnEnableTracingResponse(std::move(response));
|
|
});
|
|
|
|
// |fd| will be closed when this function returns, but it's fine because the
|
|
// IPC layer dup()'s it when sending the IPC.
|
|
consumer_port_.EnableTracing(req, std::move(async_response), *fd);
|
|
}
|
|
|
|
void ConsumerIPCClientImpl::ChangeTraceConfig(const TraceConfig&) {
|
|
if (!connected_) {
|
|
PERFETTO_DLOG(
|
|
"Cannot ChangeTraceConfig(), not connected to tracing service");
|
|
return;
|
|
}
|
|
|
|
ipc::Deferred<protos::gen::ChangeTraceConfigResponse> async_response;
|
|
async_response.Bind(
|
|
[](ipc::AsyncResult<protos::gen::ChangeTraceConfigResponse> response) {
|
|
if (!response)
|
|
PERFETTO_DLOG("ChangeTraceConfig() failed");
|
|
});
|
|
protos::gen::ChangeTraceConfigRequest req;
|
|
consumer_port_.ChangeTraceConfig(req, std::move(async_response));
|
|
}
|
|
|
|
void ConsumerIPCClientImpl::StartTracing() {
|
|
if (!connected_) {
|
|
PERFETTO_DLOG("Cannot StartTracing(), not connected to tracing service");
|
|
return;
|
|
}
|
|
|
|
ipc::Deferred<protos::gen::StartTracingResponse> async_response;
|
|
async_response.Bind(
|
|
[](ipc::AsyncResult<protos::gen::StartTracingResponse> response) {
|
|
if (!response)
|
|
PERFETTO_DLOG("StartTracing() failed");
|
|
});
|
|
protos::gen::StartTracingRequest req;
|
|
consumer_port_.StartTracing(req, std::move(async_response));
|
|
}
|
|
|
|
void ConsumerIPCClientImpl::DisableTracing() {
|
|
if (!connected_) {
|
|
PERFETTO_DLOG("Cannot DisableTracing(), not connected to tracing service");
|
|
return;
|
|
}
|
|
|
|
ipc::Deferred<protos::gen::DisableTracingResponse> async_response;
|
|
async_response.Bind(
|
|
[](ipc::AsyncResult<protos::gen::DisableTracingResponse> response) {
|
|
if (!response)
|
|
PERFETTO_DLOG("DisableTracing() failed");
|
|
});
|
|
consumer_port_.DisableTracing(protos::gen::DisableTracingRequest(),
|
|
std::move(async_response));
|
|
}
|
|
|
|
void ConsumerIPCClientImpl::ReadBuffers() {
|
|
if (!connected_) {
|
|
PERFETTO_DLOG("Cannot ReadBuffers(), not connected to tracing service");
|
|
return;
|
|
}
|
|
|
|
ipc::Deferred<protos::gen::ReadBuffersResponse> async_response;
|
|
|
|
// The IPC layer guarantees that callbacks are destroyed after this object
|
|
// is destroyed (by virtue of destroying the |consumer_port_|). In turn the
|
|
// contract of this class expects the caller to not destroy the Consumer class
|
|
// before having destroyed this class. Hence binding |this| here is safe.
|
|
async_response.Bind(
|
|
[this](ipc::AsyncResult<protos::gen::ReadBuffersResponse> response) {
|
|
OnReadBuffersResponse(std::move(response));
|
|
});
|
|
consumer_port_.ReadBuffers(protos::gen::ReadBuffersRequest(),
|
|
std::move(async_response));
|
|
}
|
|
|
|
void ConsumerIPCClientImpl::OnReadBuffersResponse(
|
|
ipc::AsyncResult<protos::gen::ReadBuffersResponse> response) {
|
|
if (!response) {
|
|
PERFETTO_DLOG("ReadBuffers() failed");
|
|
return;
|
|
}
|
|
std::vector<TracePacket> trace_packets;
|
|
for (auto& resp_slice : response->slices()) {
|
|
const std::string& slice_data = resp_slice.data();
|
|
Slice slice = Slice::Allocate(slice_data.size());
|
|
memcpy(slice.own_data(), slice_data.data(), slice.size);
|
|
partial_packet_.AddSlice(std::move(slice));
|
|
if (resp_slice.last_slice_for_packet())
|
|
trace_packets.emplace_back(std::move(partial_packet_));
|
|
}
|
|
if (!trace_packets.empty() || !response.has_more())
|
|
consumer_->OnTraceData(std::move(trace_packets), response.has_more());
|
|
}
|
|
|
|
void ConsumerIPCClientImpl::OnEnableTracingResponse(
|
|
ipc::AsyncResult<protos::gen::EnableTracingResponse> response) {
|
|
if (!response || response->disabled())
|
|
consumer_->OnTracingDisabled();
|
|
}
|
|
|
|
void ConsumerIPCClientImpl::FreeBuffers() {
|
|
if (!connected_) {
|
|
PERFETTO_DLOG("Cannot FreeBuffers(), not connected to tracing service");
|
|
return;
|
|
}
|
|
|
|
protos::gen::FreeBuffersRequest req;
|
|
ipc::Deferred<protos::gen::FreeBuffersResponse> async_response;
|
|
async_response.Bind(
|
|
[](ipc::AsyncResult<protos::gen::FreeBuffersResponse> response) {
|
|
if (!response)
|
|
PERFETTO_DLOG("FreeBuffers() failed");
|
|
});
|
|
consumer_port_.FreeBuffers(req, std::move(async_response));
|
|
}
|
|
|
|
void ConsumerIPCClientImpl::Flush(uint32_t timeout_ms, FlushCallback callback) {
|
|
if (!connected_) {
|
|
PERFETTO_DLOG("Cannot Flush(), not connected to tracing service");
|
|
return callback(/*success=*/false);
|
|
}
|
|
|
|
protos::gen::FlushRequest req;
|
|
req.set_timeout_ms(static_cast<uint32_t>(timeout_ms));
|
|
ipc::Deferred<protos::gen::FlushResponse> async_response;
|
|
async_response.Bind(
|
|
[callback](ipc::AsyncResult<protos::gen::FlushResponse> response) {
|
|
callback(!!response);
|
|
});
|
|
consumer_port_.Flush(req, std::move(async_response));
|
|
}
|
|
|
|
void ConsumerIPCClientImpl::Detach(const std::string& key) {
|
|
if (!connected_) {
|
|
PERFETTO_DLOG("Cannot Detach(), not connected to tracing service");
|
|
return;
|
|
}
|
|
|
|
protos::gen::DetachRequest req;
|
|
req.set_key(key);
|
|
ipc::Deferred<protos::gen::DetachResponse> async_response;
|
|
auto weak_this = weak_ptr_factory_.GetWeakPtr();
|
|
|
|
async_response.Bind(
|
|
[weak_this](ipc::AsyncResult<protos::gen::DetachResponse> response) {
|
|
if (weak_this)
|
|
weak_this->consumer_->OnDetach(!!response);
|
|
});
|
|
consumer_port_.Detach(req, std::move(async_response));
|
|
}
|
|
|
|
void ConsumerIPCClientImpl::Attach(const std::string& key) {
|
|
if (!connected_) {
|
|
PERFETTO_DLOG("Cannot Attach(), not connected to tracing service");
|
|
return;
|
|
}
|
|
|
|
{
|
|
protos::gen::AttachRequest req;
|
|
req.set_key(key);
|
|
ipc::Deferred<protos::gen::AttachResponse> async_response;
|
|
auto weak_this = weak_ptr_factory_.GetWeakPtr();
|
|
|
|
async_response.Bind(
|
|
[weak_this](ipc::AsyncResult<protos::gen::AttachResponse> response) {
|
|
if (!weak_this)
|
|
return;
|
|
if (!response) {
|
|
weak_this->consumer_->OnAttach(/*success=*/false, TraceConfig());
|
|
return;
|
|
}
|
|
const TraceConfig& trace_config = response->trace_config();
|
|
|
|
// If attached succesfully, also attach to the end-of-trace
|
|
// notificaton callback, via EnableTracing(attach_notification_only).
|
|
protos::gen::EnableTracingRequest enable_req;
|
|
enable_req.set_attach_notification_only(true);
|
|
ipc::Deferred<protos::gen::EnableTracingResponse> enable_resp;
|
|
enable_resp.Bind(
|
|
[weak_this](
|
|
ipc::AsyncResult<protos::gen::EnableTracingResponse> resp) {
|
|
if (weak_this)
|
|
weak_this->OnEnableTracingResponse(std::move(resp));
|
|
});
|
|
weak_this->consumer_port_.EnableTracing(enable_req,
|
|
std::move(enable_resp));
|
|
|
|
weak_this->consumer_->OnAttach(/*success=*/true, trace_config);
|
|
});
|
|
consumer_port_.Attach(req, std::move(async_response));
|
|
}
|
|
}
|
|
|
|
void ConsumerIPCClientImpl::GetTraceStats() {
|
|
if (!connected_) {
|
|
PERFETTO_DLOG("Cannot GetTraceStats(), not connected to tracing service");
|
|
return;
|
|
}
|
|
|
|
protos::gen::GetTraceStatsRequest req;
|
|
ipc::Deferred<protos::gen::GetTraceStatsResponse> async_response;
|
|
|
|
// The IPC layer guarantees that callbacks are destroyed after this object
|
|
// is destroyed (by virtue of destroying the |consumer_port_|). In turn the
|
|
// contract of this class expects the caller to not destroy the Consumer class
|
|
// before having destroyed this class. Hence binding |this| here is safe.
|
|
async_response.Bind(
|
|
[this](ipc::AsyncResult<protos::gen::GetTraceStatsResponse> response) {
|
|
if (!response) {
|
|
consumer_->OnTraceStats(/*success=*/false, TraceStats());
|
|
return;
|
|
}
|
|
consumer_->OnTraceStats(/*success=*/true, response->trace_stats());
|
|
});
|
|
consumer_port_.GetTraceStats(req, std::move(async_response));
|
|
}
|
|
|
|
void ConsumerIPCClientImpl::ObserveEvents(uint32_t enabled_event_types) {
|
|
if (!connected_) {
|
|
PERFETTO_DLOG("Cannot ObserveEvents(), not connected to tracing service");
|
|
return;
|
|
}
|
|
|
|
protos::gen::ObserveEventsRequest req;
|
|
for (uint32_t i = 0; i < 32; i++) {
|
|
const uint32_t event_id = 1u << i;
|
|
if (enabled_event_types & event_id)
|
|
req.add_events_to_observe(static_cast<ObservableEvents::Type>(event_id));
|
|
}
|
|
|
|
ipc::Deferred<protos::gen::ObserveEventsResponse> async_response;
|
|
// The IPC layer guarantees that callbacks are destroyed after this object
|
|
// is destroyed (by virtue of destroying the |consumer_port_|). In turn the
|
|
// contract of this class expects the caller to not destroy the Consumer class
|
|
// before having destroyed this class. Hence binding |this| here is safe.
|
|
async_response.Bind(
|
|
[this](ipc::AsyncResult<protos::gen::ObserveEventsResponse> response) {
|
|
// Skip empty response, which the service sends to close the stream.
|
|
if (!response.has_more()) {
|
|
PERFETTO_DCHECK(!response->events().instance_state_changes().size());
|
|
return;
|
|
}
|
|
consumer_->OnObservableEvents(response->events());
|
|
});
|
|
consumer_port_.ObserveEvents(req, std::move(async_response));
|
|
}
|
|
|
|
void ConsumerIPCClientImpl::QueryServiceState(
|
|
QueryServiceStateCallback callback) {
|
|
if (!connected_) {
|
|
PERFETTO_DLOG(
|
|
"Cannot QueryServiceState(), not connected to tracing service");
|
|
return;
|
|
}
|
|
|
|
auto it = pending_query_svc_reqs_.insert(pending_query_svc_reqs_.end(),
|
|
{std::move(callback), {}});
|
|
protos::gen::QueryServiceStateRequest req;
|
|
ipc::Deferred<protos::gen::QueryServiceStateResponse> async_response;
|
|
auto weak_this = weak_ptr_factory_.GetWeakPtr();
|
|
async_response.Bind(
|
|
[weak_this,
|
|
it](ipc::AsyncResult<protos::gen::QueryServiceStateResponse> response) {
|
|
if (weak_this)
|
|
weak_this->OnQueryServiceStateResponse(std::move(response), it);
|
|
});
|
|
consumer_port_.QueryServiceState(req, std::move(async_response));
|
|
}
|
|
|
|
void ConsumerIPCClientImpl::OnQueryServiceStateResponse(
|
|
ipc::AsyncResult<protos::gen::QueryServiceStateResponse> response,
|
|
PendingQueryServiceRequests::iterator req_it) {
|
|
PERFETTO_DCHECK(req_it->callback);
|
|
|
|
if (!response) {
|
|
auto callback = std::move(req_it->callback);
|
|
pending_query_svc_reqs_.erase(req_it);
|
|
callback(false, TracingServiceState());
|
|
return;
|
|
}
|
|
|
|
// The QueryServiceState response can be split in several chunks if the
|
|
// service has several data sources. The client is supposed to merge all the
|
|
// replies. The easiest way to achieve this is to re-serialize the partial
|
|
// response and then re-decode the merged result in one shot.
|
|
std::vector<uint8_t>& merged_resp = req_it->merged_resp;
|
|
std::vector<uint8_t> part = response->service_state().SerializeAsArray();
|
|
merged_resp.insert(merged_resp.end(), part.begin(), part.end());
|
|
|
|
if (response.has_more())
|
|
return;
|
|
|
|
// All replies have been received. Decode the merged result and reply to the
|
|
// callback.
|
|
protos::gen::TracingServiceState svc_state;
|
|
bool ok = svc_state.ParseFromArray(merged_resp.data(), merged_resp.size());
|
|
if (!ok)
|
|
PERFETTO_ELOG("Failed to decode merged QueryServiceStateResponse");
|
|
auto callback = std::move(req_it->callback);
|
|
pending_query_svc_reqs_.erase(req_it);
|
|
callback(ok, std::move(svc_state));
|
|
}
|
|
|
|
void ConsumerIPCClientImpl::QueryCapabilities(
|
|
QueryCapabilitiesCallback callback) {
|
|
if (!connected_) {
|
|
PERFETTO_DLOG(
|
|
"Cannot QueryCapabilities(), not connected to tracing service");
|
|
return;
|
|
}
|
|
|
|
protos::gen::QueryCapabilitiesRequest req;
|
|
ipc::Deferred<protos::gen::QueryCapabilitiesResponse> async_response;
|
|
async_response.Bind(
|
|
[callback](
|
|
ipc::AsyncResult<protos::gen::QueryCapabilitiesResponse> response) {
|
|
if (!response) {
|
|
// If the IPC fails, we are talking to an older version of the service
|
|
// that didn't support QueryCapabilities at all. In this case return
|
|
// an empty capabilities message.
|
|
callback(TracingServiceCapabilities());
|
|
} else {
|
|
callback(response->capabilities());
|
|
}
|
|
});
|
|
consumer_port_.QueryCapabilities(req, std::move(async_response));
|
|
}
|
|
|
|
} // namespace perfetto
|
|
// gen_amalgamated begin source: src/tracing/ipc/producer/producer_ipc_client_impl.cc
|
|
// gen_amalgamated begin header: src/tracing/ipc/producer/producer_ipc_client_impl.h
|
|
// gen_amalgamated begin header: include/perfetto/ext/tracing/ipc/producer_ipc_client.h
|
|
/*
|
|
* Copyright (C) 2017 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
#ifndef INCLUDE_PERFETTO_EXT_TRACING_IPC_PRODUCER_IPC_CLIENT_H_
|
|
#define INCLUDE_PERFETTO_EXT_TRACING_IPC_PRODUCER_IPC_CLIENT_H_
|
|
|
|
#include <memory>
|
|
#include <string>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/base/export.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/shared_memory.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/shared_memory_arbiter.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/tracing_service.h"
|
|
|
|
namespace perfetto {
|
|
|
|
class Producer;
|
|
|
|
// Allows to connect to a remote Service through a UNIX domain socket.
|
|
// Exposed to:
|
|
// Producer(s) of the tracing library.
|
|
// Implemented in:
|
|
// src/tracing/ipc/producer/producer_ipc_client_impl.cc
|
|
class PERFETTO_EXPORT ProducerIPCClient {
|
|
public:
|
|
enum class ConnectionFlags {
|
|
// Fails immediately with OnConnect(false) if the service connection cannot
|
|
// be established.
|
|
kDefault = 0,
|
|
|
|
// Keeps retrying with exponential backoff indefinitely. The caller will
|
|
// never see an OnConnect(false).
|
|
kRetryIfUnreachable = 1,
|
|
};
|
|
|
|
// Connects to the producer port of the Service listening on the given
|
|
// |service_sock_name|. If the connection is successful, the OnConnect()
|
|
// method will be invoked asynchronously on the passed Producer interface. If
|
|
// the connection fails, OnDisconnect() will be invoked instead. The returned
|
|
// ProducerEndpoint serves also to delimit the scope of the callbacks invoked
|
|
// on the Producer interface: no more Producer callbacks are invoked
|
|
// immediately after its destruction and any pending callback will be dropped.
|
|
// To provide a producer-allocated shared memory buffer, both |shm| and
|
|
// |shm_arbiter| should be set. |shm_arbiter| should be an unbound
|
|
// SharedMemoryArbiter instance. When |shm| and |shm_arbiter| are provided,
|
|
// the service will attempt to adopt the provided SMB. If this fails, the
|
|
// ProducerEndpoint will disconnect, but the SMB and arbiter will remain valid
|
|
// until the client is destroyed.
|
|
//
|
|
// TODO(eseckler): Support adoption failure more gracefully.
|
|
static std::unique_ptr<TracingService::ProducerEndpoint> Connect(
|
|
const char* service_sock_name,
|
|
Producer*,
|
|
const std::string& producer_name,
|
|
base::TaskRunner*,
|
|
TracingService::ProducerSMBScrapingMode smb_scraping_mode =
|
|
TracingService::ProducerSMBScrapingMode::kDefault,
|
|
size_t shared_memory_size_hint_bytes = 0,
|
|
size_t shared_memory_page_size_hint_bytes = 0,
|
|
std::unique_ptr<SharedMemory> shm = nullptr,
|
|
std::unique_ptr<SharedMemoryArbiter> shm_arbiter = nullptr,
|
|
ConnectionFlags = ConnectionFlags::kDefault);
|
|
|
|
protected:
|
|
ProducerIPCClient() = delete;
|
|
};
|
|
|
|
} // namespace perfetto
|
|
|
|
#endif // INCLUDE_PERFETTO_EXT_TRACING_IPC_PRODUCER_IPC_CLIENT_H_
|
|
/*
|
|
* Copyright (C) 2017 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
#ifndef SRC_TRACING_IPC_PRODUCER_PRODUCER_IPC_CLIENT_IMPL_H_
|
|
#define SRC_TRACING_IPC_PRODUCER_PRODUCER_IPC_CLIENT_IMPL_H_
|
|
|
|
#include <stdint.h>
|
|
|
|
#include <set>
|
|
#include <vector>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/thread_checker.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/ipc/service_proxy.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/basic_types.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/shared_memory.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/tracing_service.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/tracing/ipc/producer_ipc_client.h"
|
|
|
|
// gen_amalgamated expanded: #include "protos/perfetto/ipc/producer_port.ipc.h"
|
|
|
|
namespace perfetto {
|
|
|
|
namespace base {
|
|
class TaskRunner;
|
|
} // namespace base
|
|
|
|
namespace ipc {
|
|
class Client;
|
|
} // namespace ipc
|
|
|
|
class Producer;
|
|
class SharedMemoryArbiter;
|
|
|
|
// Exposes a Service endpoint to Producer(s), proxying all requests through a
|
|
// IPC channel to the remote Service. This class is the glue layer between the
|
|
// generic Service interface exposed to the clients of the library and the
|
|
// actual IPC transport.
|
|
class ProducerIPCClientImpl : public TracingService::ProducerEndpoint,
|
|
public ipc::ServiceProxy::EventListener {
|
|
public:
|
|
ProducerIPCClientImpl(const char* service_sock_name,
|
|
Producer*,
|
|
const std::string& producer_name,
|
|
base::TaskRunner*,
|
|
TracingService::ProducerSMBScrapingMode,
|
|
size_t shared_memory_size_hint_bytes,
|
|
size_t shared_memory_page_size_hint_bytes,
|
|
std::unique_ptr<SharedMemory> shm,
|
|
std::unique_ptr<SharedMemoryArbiter> shm_arbiter,
|
|
ProducerIPCClient::ConnectionFlags);
|
|
~ProducerIPCClientImpl() override;
|
|
|
|
// TracingService::ProducerEndpoint implementation.
|
|
// These methods are invoked by the actual Producer(s) code by clients of the
|
|
// tracing library, which know nothing about the IPC transport.
|
|
void RegisterDataSource(const DataSourceDescriptor&) override;
|
|
void UnregisterDataSource(const std::string& name) override;
|
|
void RegisterTraceWriter(uint32_t writer_id, uint32_t target_buffer) override;
|
|
void UnregisterTraceWriter(uint32_t writer_id) override;
|
|
void CommitData(const CommitDataRequest&, CommitDataCallback) override;
|
|
void NotifyDataSourceStarted(DataSourceInstanceID) override;
|
|
void NotifyDataSourceStopped(DataSourceInstanceID) override;
|
|
void ActivateTriggers(const std::vector<std::string>&) override;
|
|
void Sync(std::function<void()> callback) override;
|
|
|
|
std::unique_ptr<TraceWriter> CreateTraceWriter(
|
|
BufferID target_buffer,
|
|
BufferExhaustedPolicy) override;
|
|
SharedMemoryArbiter* MaybeSharedMemoryArbiter() override;
|
|
bool IsShmemProvidedByProducer() const override;
|
|
void NotifyFlushComplete(FlushRequestID) override;
|
|
SharedMemory* shared_memory() const override;
|
|
size_t shared_buffer_page_size_kb() const override;
|
|
|
|
// ipc::ServiceProxy::EventListener implementation.
|
|
// These methods are invoked by the IPC layer, which knows nothing about
|
|
// tracing, producers and consumers.
|
|
void OnConnect() override;
|
|
void OnDisconnect() override;
|
|
|
|
private:
|
|
// Invoked soon after having established the connection with the service.
|
|
void OnConnectionInitialized(bool connection_succeeded,
|
|
bool using_shmem_provided_by_producer);
|
|
|
|
// Invoked when the remote Service sends an IPC to tell us to do something
|
|
// (e.g. start/stop a data source).
|
|
void OnServiceRequest(const protos::gen::GetAsyncCommandResponse&);
|
|
|
|
// TODO think to destruction order, do we rely on any specific dtor sequence?
|
|
Producer* const producer_;
|
|
base::TaskRunner* const task_runner_;
|
|
|
|
// The object that owns the client socket and takes care of IPC traffic.
|
|
std::unique_ptr<ipc::Client> ipc_channel_;
|
|
|
|
// The proxy interface for the producer port of the service. It is bound
|
|
// to |ipc_channel_| and (de)serializes method invocations over the wire.
|
|
protos::gen::ProducerPortProxy producer_port_;
|
|
|
|
std::unique_ptr<SharedMemory> shared_memory_;
|
|
std::unique_ptr<SharedMemoryArbiter> shared_memory_arbiter_;
|
|
size_t shared_buffer_page_size_kb_ = 0;
|
|
std::set<DataSourceInstanceID> data_sources_setup_;
|
|
bool connected_ = false;
|
|
std::string const name_;
|
|
size_t shared_memory_page_size_hint_bytes_ = 0;
|
|
size_t shared_memory_size_hint_bytes_ = 0;
|
|
TracingService::ProducerSMBScrapingMode const smb_scraping_mode_;
|
|
bool is_shmem_provided_by_producer_ = false;
|
|
std::vector<std::function<void()>> pending_sync_reqs_;
|
|
PERFETTO_THREAD_CHECKER(thread_checker_)
|
|
};
|
|
|
|
} // namespace perfetto
|
|
|
|
#endif // SRC_TRACING_IPC_PRODUCER_PRODUCER_IPC_CLIENT_IMPL_H_
|
|
/*
|
|
* Copyright (C) 2017 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
// gen_amalgamated expanded: #include "src/tracing/ipc/producer/producer_ipc_client_impl.h"
|
|
|
|
#include <inttypes.h>
|
|
#include <string.h>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/base/logging.h"
|
|
// gen_amalgamated expanded: #include "perfetto/base/task_runner.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/ipc/client.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/commit_data_request.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/producer.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/shared_memory_arbiter.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/trace_writer.h"
|
|
// gen_amalgamated expanded: #include "perfetto/tracing/core/data_source_config.h"
|
|
// gen_amalgamated expanded: #include "perfetto/tracing/core/data_source_descriptor.h"
|
|
// gen_amalgamated expanded: #include "perfetto/tracing/core/trace_config.h"
|
|
// gen_amalgamated expanded: #include "src/tracing/ipc/posix_shared_memory.h"
|
|
|
|
// TODO(fmayer): think to what happens when ProducerIPCClientImpl gets destroyed
|
|
// w.r.t. the Producer pointer. Also think to lifetime of the Producer* during
|
|
// the callbacks.
|
|
|
|
namespace perfetto {
|
|
|
|
// static. (Declared in include/tracing/ipc/producer_ipc_client.h).
|
|
std::unique_ptr<TracingService::ProducerEndpoint> ProducerIPCClient::Connect(
|
|
const char* service_sock_name,
|
|
Producer* producer,
|
|
const std::string& producer_name,
|
|
base::TaskRunner* task_runner,
|
|
TracingService::ProducerSMBScrapingMode smb_scraping_mode,
|
|
size_t shared_memory_size_hint_bytes,
|
|
size_t shared_memory_page_size_hint_bytes,
|
|
std::unique_ptr<SharedMemory> shm,
|
|
std::unique_ptr<SharedMemoryArbiter> shm_arbiter,
|
|
ConnectionFlags conn_flags) {
|
|
return std::unique_ptr<TracingService::ProducerEndpoint>(
|
|
new ProducerIPCClientImpl(
|
|
service_sock_name, producer, producer_name, task_runner,
|
|
smb_scraping_mode, shared_memory_size_hint_bytes,
|
|
shared_memory_page_size_hint_bytes, std::move(shm),
|
|
std::move(shm_arbiter), conn_flags));
|
|
}
|
|
|
|
ProducerIPCClientImpl::ProducerIPCClientImpl(
|
|
const char* service_sock_name,
|
|
Producer* producer,
|
|
const std::string& producer_name,
|
|
base::TaskRunner* task_runner,
|
|
TracingService::ProducerSMBScrapingMode smb_scraping_mode,
|
|
size_t shared_memory_size_hint_bytes,
|
|
size_t shared_memory_page_size_hint_bytes,
|
|
std::unique_ptr<SharedMemory> shm,
|
|
std::unique_ptr<SharedMemoryArbiter> shm_arbiter,
|
|
ProducerIPCClient::ConnectionFlags conn_flags)
|
|
: producer_(producer),
|
|
task_runner_(task_runner),
|
|
ipc_channel_(ipc::Client::CreateInstance(
|
|
service_sock_name,
|
|
conn_flags == ProducerIPCClient::ConnectionFlags::kRetryIfUnreachable,
|
|
task_runner)),
|
|
producer_port_(this /* event_listener */),
|
|
shared_memory_(std::move(shm)),
|
|
shared_memory_arbiter_(std::move(shm_arbiter)),
|
|
name_(producer_name),
|
|
shared_memory_page_size_hint_bytes_(shared_memory_page_size_hint_bytes),
|
|
shared_memory_size_hint_bytes_(shared_memory_size_hint_bytes),
|
|
smb_scraping_mode_(smb_scraping_mode) {
|
|
// Check for producer-provided SMB (used by Chrome for startup tracing).
|
|
if (shared_memory_) {
|
|
// We also expect a valid (unbound) arbiter. Bind it to this endpoint now.
|
|
PERFETTO_CHECK(shared_memory_arbiter_);
|
|
shared_memory_arbiter_->BindToProducerEndpoint(this, task_runner_);
|
|
|
|
// If the service accepts our SMB, then it must match our requested page
|
|
// layout. The protocol doesn't allow the service to change the size and
|
|
// layout when the SMB is provided by the producer.
|
|
shared_buffer_page_size_kb_ = shared_memory_page_size_hint_bytes_ / 1024;
|
|
}
|
|
|
|
ipc_channel_->BindService(producer_port_.GetWeakPtr());
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
}
|
|
|
|
ProducerIPCClientImpl::~ProducerIPCClientImpl() = default;
|
|
|
|
// Called by the IPC layer if the BindService() succeeds.
|
|
void ProducerIPCClientImpl::OnConnect() {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
connected_ = true;
|
|
|
|
// The IPC layer guarantees that any outstanding callback will be dropped on
|
|
// the floor if producer_port_ is destroyed between the request and the reply.
|
|
// Binding |this| is hence safe.
|
|
ipc::Deferred<protos::gen::InitializeConnectionResponse> on_init;
|
|
on_init.Bind(
|
|
[this](ipc::AsyncResult<protos::gen::InitializeConnectionResponse> resp) {
|
|
OnConnectionInitialized(
|
|
resp.success(),
|
|
resp.success() ? resp->using_shmem_provided_by_producer() : false);
|
|
});
|
|
protos::gen::InitializeConnectionRequest req;
|
|
req.set_producer_name(name_);
|
|
req.set_shared_memory_size_hint_bytes(
|
|
static_cast<uint32_t>(shared_memory_size_hint_bytes_));
|
|
req.set_shared_memory_page_size_hint_bytes(
|
|
static_cast<uint32_t>(shared_memory_page_size_hint_bytes_));
|
|
switch (smb_scraping_mode_) {
|
|
case TracingService::ProducerSMBScrapingMode::kDefault:
|
|
// No need to set the mode, it defaults to use the service default if
|
|
// unspecified.
|
|
break;
|
|
case TracingService::ProducerSMBScrapingMode::kEnabled:
|
|
req.set_smb_scraping_mode(
|
|
protos::gen::InitializeConnectionRequest::SMB_SCRAPING_ENABLED);
|
|
break;
|
|
case TracingService::ProducerSMBScrapingMode::kDisabled:
|
|
req.set_smb_scraping_mode(
|
|
protos::gen::InitializeConnectionRequest::SMB_SCRAPING_DISABLED);
|
|
break;
|
|
}
|
|
|
|
int shm_fd = -1;
|
|
if (shared_memory_) {
|
|
shm_fd = shared_memory_->fd();
|
|
req.set_producer_provided_shmem(true);
|
|
}
|
|
|
|
#if PERFETTO_DCHECK_IS_ON()
|
|
req.set_build_flags(
|
|
protos::gen::InitializeConnectionRequest::BUILD_FLAGS_DCHECKS_ON);
|
|
#else
|
|
req.set_build_flags(
|
|
protos::gen::InitializeConnectionRequest::BUILD_FLAGS_DCHECKS_OFF);
|
|
#endif
|
|
producer_port_.InitializeConnection(req, std::move(on_init), shm_fd);
|
|
|
|
// Create the back channel to receive commands from the Service.
|
|
ipc::Deferred<protos::gen::GetAsyncCommandResponse> on_cmd;
|
|
on_cmd.Bind(
|
|
[this](ipc::AsyncResult<protos::gen::GetAsyncCommandResponse> resp) {
|
|
if (!resp)
|
|
return; // The IPC channel was closed and |resp| was auto-rejected.
|
|
OnServiceRequest(*resp);
|
|
});
|
|
producer_port_.GetAsyncCommand(protos::gen::GetAsyncCommandRequest(),
|
|
std::move(on_cmd));
|
|
|
|
// If there are pending Sync() requests, send them now.
|
|
for (const auto& pending_sync : pending_sync_reqs_)
|
|
Sync(std::move(pending_sync));
|
|
pending_sync_reqs_.clear();
|
|
}
|
|
|
|
void ProducerIPCClientImpl::OnDisconnect() {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
PERFETTO_DLOG("Tracing service connection failure");
|
|
connected_ = false;
|
|
producer_->OnDisconnect();
|
|
data_sources_setup_.clear();
|
|
}
|
|
|
|
void ProducerIPCClientImpl::OnConnectionInitialized(
|
|
bool connection_succeeded,
|
|
bool using_shmem_provided_by_producer) {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
// If connection_succeeded == false, the OnDisconnect() call will follow next
|
|
// and there we'll notify the |producer_|. TODO: add a test for this.
|
|
if (!connection_succeeded)
|
|
return;
|
|
is_shmem_provided_by_producer_ = using_shmem_provided_by_producer;
|
|
producer_->OnConnect();
|
|
|
|
// Bail out if the service failed to adopt our producer-allocated SMB.
|
|
// TODO(eseckler): Handle adoption failure more gracefully.
|
|
if (shared_memory_ && !is_shmem_provided_by_producer_) {
|
|
PERFETTO_DLOG("Service failed adopt producer-provided SMB, disconnecting.");
|
|
ipc_channel_.reset();
|
|
return;
|
|
}
|
|
}
|
|
|
|
void ProducerIPCClientImpl::OnServiceRequest(
|
|
const protos::gen::GetAsyncCommandResponse& cmd) {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
|
|
// This message is sent only when connecting to a service running Android Q+.
|
|
// See comment below in kStartDataSource.
|
|
if (cmd.has_setup_data_source()) {
|
|
const auto& req = cmd.setup_data_source();
|
|
const DataSourceInstanceID dsid = req.new_instance_id();
|
|
data_sources_setup_.insert(dsid);
|
|
producer_->SetupDataSource(dsid, req.config());
|
|
return;
|
|
}
|
|
|
|
if (cmd.has_start_data_source()) {
|
|
const auto& req = cmd.start_data_source();
|
|
const DataSourceInstanceID dsid = req.new_instance_id();
|
|
const DataSourceConfig& cfg = req.config();
|
|
if (!data_sources_setup_.count(dsid)) {
|
|
// When connecting with an older (Android P) service, the service will not
|
|
// send a SetupDataSource message. We synthesize it here in that case.
|
|
producer_->SetupDataSource(dsid, cfg);
|
|
}
|
|
producer_->StartDataSource(dsid, cfg);
|
|
return;
|
|
}
|
|
|
|
if (cmd.has_stop_data_source()) {
|
|
const DataSourceInstanceID dsid = cmd.stop_data_source().instance_id();
|
|
producer_->StopDataSource(dsid);
|
|
data_sources_setup_.erase(dsid);
|
|
return;
|
|
}
|
|
|
|
if (cmd.has_setup_tracing()) {
|
|
base::ScopedFile shmem_fd = ipc_channel_->TakeReceivedFD();
|
|
if (shmem_fd) {
|
|
// This is the nominal case used in most configurations, where the service
|
|
// provides the SMB.
|
|
PERFETTO_CHECK(!is_shmem_provided_by_producer_ && !shared_memory_);
|
|
// TODO(primiano): handle mmap failure in case of OOM.
|
|
shared_memory_ =
|
|
PosixSharedMemory::AttachToFd(std::move(shmem_fd),
|
|
/*require_seals_if_supported=*/false);
|
|
shared_buffer_page_size_kb_ =
|
|
cmd.setup_tracing().shared_buffer_page_size_kb();
|
|
shared_memory_arbiter_ = SharedMemoryArbiter::CreateInstance(
|
|
shared_memory_.get(), shared_buffer_page_size_kb_ * 1024, this,
|
|
task_runner_);
|
|
} else {
|
|
// Producer-provided SMB (used by Chrome for startup tracing).
|
|
PERFETTO_CHECK(is_shmem_provided_by_producer_ && shared_memory_ &&
|
|
shared_memory_arbiter_);
|
|
}
|
|
producer_->OnTracingSetup();
|
|
return;
|
|
}
|
|
|
|
if (cmd.has_flush()) {
|
|
// This cast boilerplate is required only because protobuf uses its own
|
|
// uint64 and not stdint's uint64_t. On some 64 bit archs they differ on the
|
|
// type (long vs long long) even though they have the same size.
|
|
const auto* data_source_ids = cmd.flush().data_source_ids().data();
|
|
static_assert(sizeof(data_source_ids[0]) == sizeof(DataSourceInstanceID),
|
|
"data_source_ids should be 64-bit");
|
|
producer_->Flush(
|
|
cmd.flush().request_id(),
|
|
reinterpret_cast<const DataSourceInstanceID*>(data_source_ids),
|
|
static_cast<size_t>(cmd.flush().data_source_ids().size()));
|
|
return;
|
|
}
|
|
|
|
if (cmd.has_clear_incremental_state()) {
|
|
const auto* data_source_ids =
|
|
cmd.clear_incremental_state().data_source_ids().data();
|
|
static_assert(sizeof(data_source_ids[0]) == sizeof(DataSourceInstanceID),
|
|
"data_source_ids should be 64-bit");
|
|
producer_->ClearIncrementalState(
|
|
reinterpret_cast<const DataSourceInstanceID*>(data_source_ids),
|
|
static_cast<size_t>(
|
|
cmd.clear_incremental_state().data_source_ids().size()));
|
|
return;
|
|
}
|
|
|
|
PERFETTO_DFATAL("Unknown async request received from tracing service");
|
|
}
|
|
|
|
void ProducerIPCClientImpl::RegisterDataSource(
|
|
const DataSourceDescriptor& descriptor) {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
if (!connected_) {
|
|
PERFETTO_DLOG(
|
|
"Cannot RegisterDataSource(), not connected to tracing service");
|
|
}
|
|
protos::gen::RegisterDataSourceRequest req;
|
|
*req.mutable_data_source_descriptor() = descriptor;
|
|
ipc::Deferred<protos::gen::RegisterDataSourceResponse> async_response;
|
|
async_response.Bind(
|
|
[](ipc::AsyncResult<protos::gen::RegisterDataSourceResponse> response) {
|
|
if (!response)
|
|
PERFETTO_DLOG("RegisterDataSource() failed: connection reset");
|
|
});
|
|
producer_port_.RegisterDataSource(req, std::move(async_response));
|
|
}
|
|
|
|
void ProducerIPCClientImpl::UnregisterDataSource(const std::string& name) {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
if (!connected_) {
|
|
PERFETTO_DLOG(
|
|
"Cannot UnregisterDataSource(), not connected to tracing service");
|
|
return;
|
|
}
|
|
protos::gen::UnregisterDataSourceRequest req;
|
|
req.set_data_source_name(name);
|
|
producer_port_.UnregisterDataSource(
|
|
req, ipc::Deferred<protos::gen::UnregisterDataSourceResponse>());
|
|
}
|
|
|
|
void ProducerIPCClientImpl::RegisterTraceWriter(uint32_t writer_id,
|
|
uint32_t target_buffer) {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
if (!connected_) {
|
|
PERFETTO_DLOG(
|
|
"Cannot RegisterTraceWriter(), not connected to tracing service");
|
|
return;
|
|
}
|
|
protos::gen::RegisterTraceWriterRequest req;
|
|
req.set_trace_writer_id(writer_id);
|
|
req.set_target_buffer(target_buffer);
|
|
producer_port_.RegisterTraceWriter(
|
|
req, ipc::Deferred<protos::gen::RegisterTraceWriterResponse>());
|
|
}
|
|
|
|
void ProducerIPCClientImpl::UnregisterTraceWriter(uint32_t writer_id) {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
if (!connected_) {
|
|
PERFETTO_DLOG(
|
|
"Cannot UnregisterTraceWriter(), not connected to tracing service");
|
|
return;
|
|
}
|
|
protos::gen::UnregisterTraceWriterRequest req;
|
|
req.set_trace_writer_id(writer_id);
|
|
producer_port_.UnregisterTraceWriter(
|
|
req, ipc::Deferred<protos::gen::UnregisterTraceWriterResponse>());
|
|
}
|
|
|
|
void ProducerIPCClientImpl::CommitData(const CommitDataRequest& req,
|
|
CommitDataCallback callback) {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
if (!connected_) {
|
|
PERFETTO_DLOG("Cannot CommitData(), not connected to tracing service");
|
|
return;
|
|
}
|
|
ipc::Deferred<protos::gen::CommitDataResponse> async_response;
|
|
// TODO(primiano): add a test that destroys ProducerIPCClientImpl soon after
|
|
// this call and checks that the callback is dropped.
|
|
if (callback) {
|
|
async_response.Bind(
|
|
[callback](ipc::AsyncResult<protos::gen::CommitDataResponse> response) {
|
|
if (!response) {
|
|
PERFETTO_DLOG("CommitData() failed: connection reset");
|
|
return;
|
|
}
|
|
callback();
|
|
});
|
|
}
|
|
producer_port_.CommitData(req, std::move(async_response));
|
|
}
|
|
|
|
void ProducerIPCClientImpl::NotifyDataSourceStarted(DataSourceInstanceID id) {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
if (!connected_) {
|
|
PERFETTO_DLOG(
|
|
"Cannot NotifyDataSourceStarted(), not connected to tracing service");
|
|
return;
|
|
}
|
|
protos::gen::NotifyDataSourceStartedRequest req;
|
|
req.set_data_source_id(id);
|
|
producer_port_.NotifyDataSourceStarted(
|
|
req, ipc::Deferred<protos::gen::NotifyDataSourceStartedResponse>());
|
|
}
|
|
|
|
void ProducerIPCClientImpl::NotifyDataSourceStopped(DataSourceInstanceID id) {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
if (!connected_) {
|
|
PERFETTO_DLOG(
|
|
"Cannot NotifyDataSourceStopped(), not connected to tracing service");
|
|
return;
|
|
}
|
|
protos::gen::NotifyDataSourceStoppedRequest req;
|
|
req.set_data_source_id(id);
|
|
producer_port_.NotifyDataSourceStopped(
|
|
req, ipc::Deferred<protos::gen::NotifyDataSourceStoppedResponse>());
|
|
}
|
|
|
|
void ProducerIPCClientImpl::ActivateTriggers(
|
|
const std::vector<std::string>& triggers) {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
if (!connected_) {
|
|
PERFETTO_DLOG(
|
|
"Cannot ActivateTriggers(), not connected to tracing service");
|
|
return;
|
|
}
|
|
protos::gen::ActivateTriggersRequest proto_req;
|
|
for (const auto& name : triggers) {
|
|
*proto_req.add_trigger_names() = name;
|
|
}
|
|
producer_port_.ActivateTriggers(
|
|
proto_req, ipc::Deferred<protos::gen::ActivateTriggersResponse>());
|
|
}
|
|
|
|
void ProducerIPCClientImpl::Sync(std::function<void()> callback) {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
if (!connected_) {
|
|
pending_sync_reqs_.emplace_back(std::move(callback));
|
|
return;
|
|
}
|
|
ipc::Deferred<protos::gen::SyncResponse> resp;
|
|
resp.Bind([callback](ipc::AsyncResult<protos::gen::SyncResponse>) {
|
|
// Here we ACK the callback even if the service replies with a failure
|
|
// (i.e. the service is too old and doesn't understand Sync()). In that
|
|
// case the service has still seen the request, the IPC roundtrip is
|
|
// still a (weaker) linearization fence.
|
|
callback();
|
|
});
|
|
producer_port_.Sync(protos::gen::SyncRequest(), std::move(resp));
|
|
}
|
|
|
|
std::unique_ptr<TraceWriter> ProducerIPCClientImpl::CreateTraceWriter(
|
|
BufferID target_buffer,
|
|
BufferExhaustedPolicy buffer_exhausted_policy) {
|
|
// This method can be called by different threads. |shared_memory_arbiter_| is
|
|
// thread-safe but be aware of accessing any other state in this function.
|
|
return shared_memory_arbiter_->CreateTraceWriter(target_buffer,
|
|
buffer_exhausted_policy);
|
|
}
|
|
|
|
SharedMemoryArbiter* ProducerIPCClientImpl::MaybeSharedMemoryArbiter() {
|
|
return shared_memory_arbiter_.get();
|
|
}
|
|
|
|
bool ProducerIPCClientImpl::IsShmemProvidedByProducer() const {
|
|
return is_shmem_provided_by_producer_;
|
|
}
|
|
|
|
void ProducerIPCClientImpl::NotifyFlushComplete(FlushRequestID req_id) {
|
|
return shared_memory_arbiter_->NotifyFlushComplete(req_id);
|
|
}
|
|
|
|
SharedMemory* ProducerIPCClientImpl::shared_memory() const {
|
|
return shared_memory_.get();
|
|
}
|
|
|
|
size_t ProducerIPCClientImpl::shared_buffer_page_size_kb() const {
|
|
return shared_buffer_page_size_kb_;
|
|
}
|
|
|
|
} // namespace perfetto
|
|
// gen_amalgamated begin source: src/ipc/host_impl.cc
|
|
// gen_amalgamated begin header: src/ipc/host_impl.h
|
|
/*
|
|
* Copyright (C) 2017 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
#ifndef SRC_IPC_HOST_IMPL_H_
|
|
#define SRC_IPC_HOST_IMPL_H_
|
|
|
|
#include <map>
|
|
#include <set>
|
|
#include <string>
|
|
#include <vector>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/base/task_runner.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/thread_checker.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/unix_socket.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/ipc/deferred.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/ipc/host.h"
|
|
// gen_amalgamated expanded: #include "src/ipc/buffered_frame_deserializer.h"
|
|
|
|
namespace perfetto {
|
|
namespace ipc {
|
|
|
|
class HostImpl : public Host, public base::UnixSocket::EventListener {
|
|
public:
|
|
HostImpl(const char* socket_name, base::TaskRunner*);
|
|
HostImpl(base::ScopedFile socket_fd, base::TaskRunner*);
|
|
~HostImpl() override;
|
|
|
|
// Host implementation.
|
|
bool ExposeService(std::unique_ptr<Service>) override;
|
|
|
|
// base::UnixSocket::EventListener implementation.
|
|
void OnNewIncomingConnection(base::UnixSocket*,
|
|
std::unique_ptr<base::UnixSocket>) override;
|
|
void OnDisconnect(base::UnixSocket*) override;
|
|
void OnDataAvailable(base::UnixSocket*) override;
|
|
|
|
const base::UnixSocket* sock() const { return sock_.get(); }
|
|
|
|
private:
|
|
// Owns the per-client receive buffer (BufferedFrameDeserializer).
|
|
struct ClientConnection {
|
|
~ClientConnection();
|
|
ClientID id;
|
|
std::unique_ptr<base::UnixSocket> sock;
|
|
BufferedFrameDeserializer frame_deserializer;
|
|
base::ScopedFile received_fd;
|
|
};
|
|
struct ExposedService {
|
|
ExposedService(ServiceID, const std::string&, std::unique_ptr<Service>);
|
|
~ExposedService();
|
|
ExposedService(ExposedService&&) noexcept;
|
|
ExposedService& operator=(ExposedService&&);
|
|
|
|
ServiceID id;
|
|
std::string name;
|
|
std::unique_ptr<Service> instance;
|
|
};
|
|
|
|
HostImpl(const HostImpl&) = delete;
|
|
HostImpl& operator=(const HostImpl&) = delete;
|
|
|
|
bool Initialize(const char* socket_name);
|
|
void OnReceivedFrame(ClientConnection*, const Frame&);
|
|
void OnBindService(ClientConnection*, const Frame&);
|
|
void OnInvokeMethod(ClientConnection*, const Frame&);
|
|
void ReplyToMethodInvocation(ClientID, RequestID, AsyncResult<ProtoMessage>);
|
|
const ExposedService* GetServiceByName(const std::string&);
|
|
|
|
static void SendFrame(ClientConnection*, const Frame&, int fd = -1);
|
|
|
|
base::TaskRunner* const task_runner_;
|
|
std::map<ServiceID, ExposedService> services_;
|
|
std::unique_ptr<base::UnixSocket> sock_; // The listening socket.
|
|
std::map<ClientID, std::unique_ptr<ClientConnection>> clients_;
|
|
std::map<base::UnixSocket*, ClientConnection*> clients_by_socket_;
|
|
ServiceID last_service_id_ = 0;
|
|
ClientID last_client_id_ = 0;
|
|
PERFETTO_THREAD_CHECKER(thread_checker_)
|
|
base::WeakPtrFactory<HostImpl> weak_ptr_factory_; // Keep last.
|
|
};
|
|
|
|
} // namespace ipc
|
|
} // namespace perfetto
|
|
|
|
#endif // SRC_IPC_HOST_IMPL_H_
|
|
/*
|
|
* Copyright (C) 2017 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
// gen_amalgamated expanded: #include "src/ipc/host_impl.h"
|
|
|
|
#include <inttypes.h>
|
|
|
|
#include <algorithm>
|
|
#include <utility>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/base/task_runner.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/utils.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/ipc/service.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/ipc/service_descriptor.h"
|
|
|
|
// gen_amalgamated expanded: #include "protos/perfetto/ipc/wire_protocol.gen.h"
|
|
|
|
// TODO(primiano): put limits on #connections/uid and req. queue (b/69093705).
|
|
|
|
namespace perfetto {
|
|
namespace ipc {
|
|
|
|
// static
|
|
std::unique_ptr<Host> Host::CreateInstance(const char* socket_name,
|
|
base::TaskRunner* task_runner) {
|
|
std::unique_ptr<HostImpl> host(new HostImpl(socket_name, task_runner));
|
|
if (!host->sock() || !host->sock()->is_listening())
|
|
return nullptr;
|
|
return std::unique_ptr<Host>(std::move(host));
|
|
}
|
|
|
|
// static
|
|
std::unique_ptr<Host> Host::CreateInstance(base::ScopedFile socket_fd,
|
|
base::TaskRunner* task_runner) {
|
|
std::unique_ptr<HostImpl> host(
|
|
new HostImpl(std::move(socket_fd), task_runner));
|
|
if (!host->sock() || !host->sock()->is_listening())
|
|
return nullptr;
|
|
return std::unique_ptr<Host>(std::move(host));
|
|
}
|
|
|
|
HostImpl::HostImpl(base::ScopedFile socket_fd, base::TaskRunner* task_runner)
|
|
: task_runner_(task_runner), weak_ptr_factory_(this) {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
sock_ = base::UnixSocket::Listen(std::move(socket_fd), this, task_runner_,
|
|
base::SockFamily::kUnix,
|
|
base::SockType::kStream);
|
|
}
|
|
|
|
HostImpl::HostImpl(const char* socket_name, base::TaskRunner* task_runner)
|
|
: task_runner_(task_runner), weak_ptr_factory_(this) {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
sock_ = base::UnixSocket::Listen(socket_name, this, task_runner_,
|
|
base::SockFamily::kUnix,
|
|
base::SockType::kStream);
|
|
}
|
|
|
|
HostImpl::~HostImpl() = default;
|
|
|
|
bool HostImpl::ExposeService(std::unique_ptr<Service> service) {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
const std::string& service_name = service->GetDescriptor().service_name;
|
|
if (GetServiceByName(service_name)) {
|
|
PERFETTO_DLOG("Duplicate ExposeService(): %s", service_name.c_str());
|
|
return false;
|
|
}
|
|
ServiceID sid = ++last_service_id_;
|
|
ExposedService exposed_service(sid, service_name, std::move(service));
|
|
services_.emplace(sid, std::move(exposed_service));
|
|
return true;
|
|
}
|
|
|
|
void HostImpl::OnNewIncomingConnection(
|
|
base::UnixSocket*,
|
|
std::unique_ptr<base::UnixSocket> new_conn) {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
std::unique_ptr<ClientConnection> client(new ClientConnection());
|
|
ClientID client_id = ++last_client_id_;
|
|
clients_by_socket_[new_conn.get()] = client.get();
|
|
client->id = client_id;
|
|
client->sock = std::move(new_conn);
|
|
clients_[client_id] = std::move(client);
|
|
}
|
|
|
|
void HostImpl::OnDataAvailable(base::UnixSocket* sock) {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
auto it = clients_by_socket_.find(sock);
|
|
if (it == clients_by_socket_.end())
|
|
return;
|
|
ClientConnection* client = it->second;
|
|
BufferedFrameDeserializer& frame_deserializer = client->frame_deserializer;
|
|
|
|
size_t rsize;
|
|
do {
|
|
auto buf = frame_deserializer.BeginReceive();
|
|
base::ScopedFile fd;
|
|
rsize = client->sock->Receive(buf.data, buf.size, &fd);
|
|
if (fd) {
|
|
PERFETTO_DCHECK(!client->received_fd);
|
|
client->received_fd = std::move(fd);
|
|
}
|
|
if (!frame_deserializer.EndReceive(rsize))
|
|
return OnDisconnect(client->sock.get());
|
|
} while (rsize > 0);
|
|
|
|
for (;;) {
|
|
std::unique_ptr<Frame> frame = frame_deserializer.PopNextFrame();
|
|
if (!frame)
|
|
break;
|
|
OnReceivedFrame(client, *frame);
|
|
}
|
|
}
|
|
|
|
void HostImpl::OnReceivedFrame(ClientConnection* client,
|
|
const Frame& req_frame) {
|
|
if (req_frame.has_msg_bind_service())
|
|
return OnBindService(client, req_frame);
|
|
if (req_frame.has_msg_invoke_method())
|
|
return OnInvokeMethod(client, req_frame);
|
|
|
|
PERFETTO_DLOG("Received invalid RPC frame from client %" PRIu64, client->id);
|
|
Frame reply_frame;
|
|
reply_frame.set_request_id(req_frame.request_id());
|
|
reply_frame.mutable_msg_request_error()->set_error("unknown request");
|
|
SendFrame(client, reply_frame);
|
|
}
|
|
|
|
void HostImpl::OnBindService(ClientConnection* client, const Frame& req_frame) {
|
|
// Binding a service doesn't do anything major. It just returns back the
|
|
// service id and its method map.
|
|
const Frame::BindService& req = req_frame.msg_bind_service();
|
|
Frame reply_frame;
|
|
reply_frame.set_request_id(req_frame.request_id());
|
|
auto* reply = reply_frame.mutable_msg_bind_service_reply();
|
|
const ExposedService* service = GetServiceByName(req.service_name());
|
|
if (service) {
|
|
reply->set_success(true);
|
|
reply->set_service_id(service->id);
|
|
uint32_t method_id = 1; // method ids start at index 1.
|
|
for (const auto& desc_method : service->instance->GetDescriptor().methods) {
|
|
Frame::BindServiceReply::MethodInfo* method_info = reply->add_methods();
|
|
method_info->set_name(desc_method.name);
|
|
method_info->set_id(method_id++);
|
|
}
|
|
}
|
|
SendFrame(client, reply_frame);
|
|
}
|
|
|
|
void HostImpl::OnInvokeMethod(ClientConnection* client,
|
|
const Frame& req_frame) {
|
|
const Frame::InvokeMethod& req = req_frame.msg_invoke_method();
|
|
Frame reply_frame;
|
|
RequestID request_id = req_frame.request_id();
|
|
reply_frame.set_request_id(request_id);
|
|
reply_frame.mutable_msg_invoke_method_reply()->set_success(false);
|
|
auto svc_it = services_.find(req.service_id());
|
|
if (svc_it == services_.end())
|
|
return SendFrame(client, reply_frame); // |success| == false by default.
|
|
|
|
Service* service = svc_it->second.instance.get();
|
|
const ServiceDescriptor& svc = service->GetDescriptor();
|
|
const auto& methods = svc.methods;
|
|
const uint32_t method_id = req.method_id();
|
|
if (method_id == 0 || method_id > methods.size())
|
|
return SendFrame(client, reply_frame);
|
|
|
|
const ServiceDescriptor::Method& method = methods[method_id - 1];
|
|
std::unique_ptr<ProtoMessage> decoded_req_args(
|
|
method.request_proto_decoder(req.args_proto()));
|
|
if (!decoded_req_args)
|
|
return SendFrame(client, reply_frame);
|
|
|
|
Deferred<ProtoMessage> deferred_reply;
|
|
base::WeakPtr<HostImpl> host_weak_ptr = weak_ptr_factory_.GetWeakPtr();
|
|
ClientID client_id = client->id;
|
|
|
|
if (!req.drop_reply()) {
|
|
deferred_reply.Bind([host_weak_ptr, client_id,
|
|
request_id](AsyncResult<ProtoMessage> reply) {
|
|
if (!host_weak_ptr)
|
|
return; // The reply came too late, the HostImpl has gone.
|
|
host_weak_ptr->ReplyToMethodInvocation(client_id, request_id,
|
|
std::move(reply));
|
|
});
|
|
}
|
|
|
|
service->client_info_ = ClientInfo(client->id, client->sock->peer_uid());
|
|
service->received_fd_ = &client->received_fd;
|
|
method.invoker(service, *decoded_req_args, std::move(deferred_reply));
|
|
service->received_fd_ = nullptr;
|
|
service->client_info_ = ClientInfo();
|
|
}
|
|
|
|
void HostImpl::ReplyToMethodInvocation(ClientID client_id,
|
|
RequestID request_id,
|
|
AsyncResult<ProtoMessage> reply) {
|
|
auto client_iter = clients_.find(client_id);
|
|
if (client_iter == clients_.end())
|
|
return; // client has disconnected by the time we got the async reply.
|
|
|
|
ClientConnection* client = client_iter->second.get();
|
|
Frame reply_frame;
|
|
reply_frame.set_request_id(request_id);
|
|
|
|
// TODO(fmayer): add a test to guarantee that the reply is consumed within the
|
|
// same call stack and not kept around. ConsumerIPCService::OnTraceData()
|
|
// relies on this behavior.
|
|
auto* reply_frame_data = reply_frame.mutable_msg_invoke_method_reply();
|
|
reply_frame_data->set_has_more(reply.has_more());
|
|
if (reply.success()) {
|
|
std::string reply_proto = reply->SerializeAsString();
|
|
reply_frame_data->set_reply_proto(reply_proto);
|
|
reply_frame_data->set_success(true);
|
|
}
|
|
SendFrame(client, reply_frame, reply.fd());
|
|
}
|
|
|
|
// static
|
|
void HostImpl::SendFrame(ClientConnection* client, const Frame& frame, int fd) {
|
|
std::string buf = BufferedFrameDeserializer::Serialize(frame);
|
|
|
|
// TODO(primiano): this should do non-blocking I/O. But then what if the
|
|
// socket buffer is full? We might want to either drop the request or throttle
|
|
// the send and PostTask the reply later? Right now we are making Send()
|
|
// blocking as a workaround. Propagate bakpressure to the caller instead.
|
|
bool res = client->sock->Send(buf.data(), buf.size(), fd);
|
|
PERFETTO_CHECK(res || !client->sock->is_connected());
|
|
}
|
|
|
|
void HostImpl::OnDisconnect(base::UnixSocket* sock) {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
auto it = clients_by_socket_.find(sock);
|
|
if (it == clients_by_socket_.end())
|
|
return;
|
|
ClientID client_id = it->second->id;
|
|
ClientInfo client_info(client_id, sock->peer_uid());
|
|
clients_by_socket_.erase(it);
|
|
PERFETTO_DCHECK(clients_.count(client_id));
|
|
clients_.erase(client_id);
|
|
|
|
for (const auto& service_it : services_) {
|
|
Service& service = *service_it.second.instance;
|
|
service.client_info_ = client_info;
|
|
service.OnClientDisconnected();
|
|
service.client_info_ = ClientInfo();
|
|
}
|
|
}
|
|
|
|
const HostImpl::ExposedService* HostImpl::GetServiceByName(
|
|
const std::string& name) {
|
|
// This could be optimized by using another map<name,ServiceID>. However this
|
|
// is used only by Bind/ExposeService that are quite rare (once per client
|
|
// connection and once per service instance), not worth it.
|
|
for (const auto& it : services_) {
|
|
if (it.second.name == name)
|
|
return &it.second;
|
|
}
|
|
return nullptr;
|
|
}
|
|
|
|
HostImpl::ExposedService::ExposedService(ServiceID id_,
|
|
const std::string& name_,
|
|
std::unique_ptr<Service> instance_)
|
|
: id(id_), name(name_), instance(std::move(instance_)) {}
|
|
|
|
HostImpl::ExposedService::ExposedService(ExposedService&&) noexcept = default;
|
|
HostImpl::ExposedService& HostImpl::ExposedService::operator=(
|
|
HostImpl::ExposedService&&) = default;
|
|
HostImpl::ExposedService::~ExposedService() = default;
|
|
|
|
HostImpl::ClientConnection::~ClientConnection() = default;
|
|
|
|
} // namespace ipc
|
|
} // namespace perfetto
|
|
// gen_amalgamated begin source: src/tracing/ipc/service/consumer_ipc_service.cc
|
|
// gen_amalgamated begin header: src/tracing/ipc/service/consumer_ipc_service.h
|
|
/*
|
|
* Copyright (C) 2017 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
#ifndef SRC_TRACING_IPC_SERVICE_CONSUMER_IPC_SERVICE_H_
|
|
#define SRC_TRACING_IPC_SERVICE_CONSUMER_IPC_SERVICE_H_
|
|
|
|
#include <list>
|
|
#include <map>
|
|
#include <memory>
|
|
#include <string>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/weak_ptr.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/ipc/basic_types.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/consumer.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/tracing_service.h"
|
|
// gen_amalgamated expanded: #include "perfetto/tracing/core/forward_decls.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/ipc/consumer_port.ipc.h"
|
|
|
|
namespace perfetto {
|
|
|
|
namespace ipc {
|
|
class Host;
|
|
} // namespace ipc
|
|
|
|
// Implements the Consumer port of the IPC service. This class proxies requests
|
|
// and responses between the core service logic (|svc_|) and remote Consumer(s)
|
|
// on the IPC socket, through the methods overriddden from ConsumerPort.
|
|
class ConsumerIPCService : public protos::gen::ConsumerPort {
|
|
public:
|
|
explicit ConsumerIPCService(TracingService* core_service);
|
|
~ConsumerIPCService() override;
|
|
|
|
// ConsumerPort implementation (from .proto IPC definition).
|
|
void EnableTracing(const protos::gen::EnableTracingRequest&,
|
|
DeferredEnableTracingResponse) override;
|
|
void StartTracing(const protos::gen::StartTracingRequest&,
|
|
DeferredStartTracingResponse) override;
|
|
void ChangeTraceConfig(const protos::gen::ChangeTraceConfigRequest&,
|
|
DeferredChangeTraceConfigResponse) override;
|
|
void DisableTracing(const protos::gen::DisableTracingRequest&,
|
|
DeferredDisableTracingResponse) override;
|
|
void ReadBuffers(const protos::gen::ReadBuffersRequest&,
|
|
DeferredReadBuffersResponse) override;
|
|
void FreeBuffers(const protos::gen::FreeBuffersRequest&,
|
|
DeferredFreeBuffersResponse) override;
|
|
void Flush(const protos::gen::FlushRequest&, DeferredFlushResponse) override;
|
|
void Detach(const protos::gen::DetachRequest&,
|
|
DeferredDetachResponse) override;
|
|
void Attach(const protos::gen::AttachRequest&,
|
|
DeferredAttachResponse) override;
|
|
void GetTraceStats(const protos::gen::GetTraceStatsRequest&,
|
|
DeferredGetTraceStatsResponse) override;
|
|
void ObserveEvents(const protos::gen::ObserveEventsRequest&,
|
|
DeferredObserveEventsResponse) override;
|
|
void QueryServiceState(const protos::gen::QueryServiceStateRequest&,
|
|
DeferredQueryServiceStateResponse) override;
|
|
void QueryCapabilities(const protos::gen::QueryCapabilitiesRequest&,
|
|
DeferredQueryCapabilitiesResponse) override;
|
|
void OnClientDisconnected() override;
|
|
|
|
private:
|
|
// Acts like a Consumer with the core Service business logic (which doesn't
|
|
// know anything about the remote transport), but all it does is proxying
|
|
// methods to the remote Consumer on the other side of the IPC channel.
|
|
class RemoteConsumer : public Consumer {
|
|
public:
|
|
RemoteConsumer();
|
|
~RemoteConsumer() override;
|
|
|
|
// These methods are called by the |core_service_| business logic. There is
|
|
// no connection here, these methods are posted straight away.
|
|
void OnConnect() override;
|
|
void OnDisconnect() override;
|
|
void OnTracingDisabled() override;
|
|
void OnTraceData(std::vector<TracePacket>, bool has_more) override;
|
|
void OnDetach(bool) override;
|
|
void OnAttach(bool, const TraceConfig&) override;
|
|
void OnTraceStats(bool, const TraceStats&) override;
|
|
void OnObservableEvents(const ObservableEvents&) override;
|
|
|
|
void CloseObserveEventsResponseStream();
|
|
|
|
// The interface obtained from the core service business logic through
|
|
// TracingService::ConnectConsumer(this). This allows to invoke methods for
|
|
// a specific Consumer on the Service business logic.
|
|
std::unique_ptr<TracingService::ConsumerEndpoint> service_endpoint;
|
|
|
|
// After ReadBuffers() is invoked, this binds the async callback that
|
|
// allows to stream trace packets back to the client.
|
|
DeferredReadBuffersResponse read_buffers_response;
|
|
|
|
// After EnableTracing() is invoked, this binds the async callback that
|
|
// allows to send the OnTracingDisabled notification.
|
|
DeferredEnableTracingResponse enable_tracing_response;
|
|
|
|
// After Detach() is invoked, this binds the async callback that allows to
|
|
// send the session id to the consumer.
|
|
DeferredDetachResponse detach_response;
|
|
|
|
// As above, but for the Attach() case.
|
|
DeferredAttachResponse attach_response;
|
|
|
|
// As above, but for GetTraceStats().
|
|
DeferredGetTraceStatsResponse get_trace_stats_response;
|
|
|
|
// After ObserveEvents() is invoked, this binds the async callback that
|
|
// allows to stream ObservableEvents back to the client.
|
|
DeferredObserveEventsResponse observe_events_response;
|
|
};
|
|
|
|
// This has to be a container that doesn't invalidate iterators.
|
|
using PendingFlushResponses = std::list<DeferredFlushResponse>;
|
|
using PendingQuerySvcResponses = std::list<DeferredQueryServiceStateResponse>;
|
|
using PendingQueryCapabilitiesResponses =
|
|
std::list<DeferredQueryCapabilitiesResponse>;
|
|
|
|
ConsumerIPCService(const ConsumerIPCService&) = delete;
|
|
ConsumerIPCService& operator=(const ConsumerIPCService&) = delete;
|
|
|
|
// Returns the ConsumerEndpoint in the core business logic that corresponds to
|
|
// the current IPC request.
|
|
RemoteConsumer* GetConsumerForCurrentRequest();
|
|
|
|
void OnFlushCallback(bool success, PendingFlushResponses::iterator);
|
|
void OnQueryServiceCallback(bool success,
|
|
const TracingServiceState&,
|
|
PendingQuerySvcResponses::iterator);
|
|
void OnQueryCapabilitiesCallback(const TracingServiceCapabilities&,
|
|
PendingQueryCapabilitiesResponses::iterator);
|
|
|
|
TracingService* const core_service_;
|
|
|
|
// Maps IPC clients to ConsumerEndpoint instances registered on the
|
|
// |core_service_| business logic.
|
|
std::map<ipc::ClientID, std::unique_ptr<RemoteConsumer>> consumers_;
|
|
|
|
PendingFlushResponses pending_flush_responses_;
|
|
PendingQuerySvcResponses pending_query_service_responses_;
|
|
PendingQueryCapabilitiesResponses pending_query_capabilities_responses_;
|
|
|
|
base::WeakPtrFactory<ConsumerIPCService> weak_ptr_factory_; // Keep last.
|
|
};
|
|
|
|
} // namespace perfetto
|
|
|
|
#endif // SRC_TRACING_IPC_SERVICE_CONSUMER_IPC_SERVICE_H_
|
|
/*
|
|
* Copyright (C) 2017 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
// gen_amalgamated expanded: #include "src/tracing/ipc/service/consumer_ipc_service.h"
|
|
|
|
#include <inttypes.h>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/base/logging.h"
|
|
// gen_amalgamated expanded: #include "perfetto/base/task_runner.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/scoped_file.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/ipc/basic_types.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/ipc/host.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/shared_memory_abi.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/slice.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/trace_packet.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/trace_stats.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/tracing_service.h"
|
|
// gen_amalgamated expanded: #include "perfetto/tracing/core/trace_config.h"
|
|
// gen_amalgamated expanded: #include "perfetto/tracing/core/tracing_service_capabilities.h"
|
|
// gen_amalgamated expanded: #include "perfetto/tracing/core/tracing_service_state.h"
|
|
|
|
namespace perfetto {
|
|
|
|
ConsumerIPCService::ConsumerIPCService(TracingService* core_service)
|
|
: core_service_(core_service), weak_ptr_factory_(this) {}
|
|
|
|
ConsumerIPCService::~ConsumerIPCService() = default;
|
|
|
|
ConsumerIPCService::RemoteConsumer*
|
|
ConsumerIPCService::GetConsumerForCurrentRequest() {
|
|
const ipc::ClientID ipc_client_id = ipc::Service::client_info().client_id();
|
|
const uid_t uid = ipc::Service::client_info().uid();
|
|
PERFETTO_CHECK(ipc_client_id);
|
|
auto it = consumers_.find(ipc_client_id);
|
|
if (it == consumers_.end()) {
|
|
auto* remote_consumer = new RemoteConsumer();
|
|
consumers_[ipc_client_id].reset(remote_consumer);
|
|
remote_consumer->service_endpoint =
|
|
core_service_->ConnectConsumer(remote_consumer, uid);
|
|
return remote_consumer;
|
|
}
|
|
return it->second.get();
|
|
}
|
|
|
|
// Called by the IPC layer.
|
|
void ConsumerIPCService::OnClientDisconnected() {
|
|
ipc::ClientID client_id = ipc::Service::client_info().client_id();
|
|
consumers_.erase(client_id);
|
|
}
|
|
|
|
// Called by the IPC layer.
|
|
void ConsumerIPCService::EnableTracing(
|
|
const protos::gen::EnableTracingRequest& req,
|
|
DeferredEnableTracingResponse resp) {
|
|
RemoteConsumer* remote_consumer = GetConsumerForCurrentRequest();
|
|
if (req.attach_notification_only()) {
|
|
remote_consumer->enable_tracing_response = std::move(resp);
|
|
return;
|
|
}
|
|
const TraceConfig& trace_config = req.trace_config();
|
|
base::ScopedFile fd;
|
|
if (trace_config.write_into_file() && trace_config.output_path().empty())
|
|
fd = ipc::Service::TakeReceivedFD();
|
|
remote_consumer->service_endpoint->EnableTracing(trace_config, std::move(fd));
|
|
remote_consumer->enable_tracing_response = std::move(resp);
|
|
}
|
|
|
|
// Called by the IPC layer.
|
|
void ConsumerIPCService::StartTracing(const protos::gen::StartTracingRequest&,
|
|
DeferredStartTracingResponse resp) {
|
|
RemoteConsumer* remote_consumer = GetConsumerForCurrentRequest();
|
|
remote_consumer->service_endpoint->StartTracing();
|
|
resp.Resolve(ipc::AsyncResult<protos::gen::StartTracingResponse>::Create());
|
|
}
|
|
|
|
// Called by the IPC layer.
|
|
void ConsumerIPCService::ChangeTraceConfig(
|
|
const protos::gen::ChangeTraceConfigRequest& req,
|
|
DeferredChangeTraceConfigResponse resp) {
|
|
RemoteConsumer* remote_consumer = GetConsumerForCurrentRequest();
|
|
remote_consumer->service_endpoint->ChangeTraceConfig(req.trace_config());
|
|
resp.Resolve(
|
|
ipc::AsyncResult<protos::gen::ChangeTraceConfigResponse>::Create());
|
|
}
|
|
|
|
// Called by the IPC layer.
|
|
void ConsumerIPCService::DisableTracing(
|
|
const protos::gen::DisableTracingRequest&,
|
|
DeferredDisableTracingResponse resp) {
|
|
GetConsumerForCurrentRequest()->service_endpoint->DisableTracing();
|
|
resp.Resolve(ipc::AsyncResult<protos::gen::DisableTracingResponse>::Create());
|
|
}
|
|
|
|
// Called by the IPC layer.
|
|
void ConsumerIPCService::ReadBuffers(const protos::gen::ReadBuffersRequest&,
|
|
DeferredReadBuffersResponse resp) {
|
|
RemoteConsumer* remote_consumer = GetConsumerForCurrentRequest();
|
|
remote_consumer->read_buffers_response = std::move(resp);
|
|
remote_consumer->service_endpoint->ReadBuffers();
|
|
}
|
|
|
|
// Called by the IPC layer.
|
|
void ConsumerIPCService::FreeBuffers(const protos::gen::FreeBuffersRequest&,
|
|
DeferredFreeBuffersResponse resp) {
|
|
GetConsumerForCurrentRequest()->service_endpoint->FreeBuffers();
|
|
resp.Resolve(ipc::AsyncResult<protos::gen::FreeBuffersResponse>::Create());
|
|
}
|
|
|
|
// Called by the IPC layer.
|
|
void ConsumerIPCService::Flush(const protos::gen::FlushRequest& req,
|
|
DeferredFlushResponse resp) {
|
|
auto it = pending_flush_responses_.insert(pending_flush_responses_.end(),
|
|
std::move(resp));
|
|
auto weak_this = weak_ptr_factory_.GetWeakPtr();
|
|
auto callback = [weak_this, it](bool success) {
|
|
if (weak_this)
|
|
weak_this->OnFlushCallback(success, std::move(it));
|
|
};
|
|
GetConsumerForCurrentRequest()->service_endpoint->Flush(req.timeout_ms(),
|
|
std::move(callback));
|
|
}
|
|
|
|
// Called by the IPC layer.
|
|
void ConsumerIPCService::Detach(const protos::gen::DetachRequest& req,
|
|
DeferredDetachResponse resp) {
|
|
// OnDetach() will resolve the |detach_response|.
|
|
RemoteConsumer* remote_consumer = GetConsumerForCurrentRequest();
|
|
remote_consumer->detach_response = std::move(resp);
|
|
remote_consumer->service_endpoint->Detach(req.key());
|
|
}
|
|
|
|
// Called by the IPC layer.
|
|
void ConsumerIPCService::Attach(const protos::gen::AttachRequest& req,
|
|
DeferredAttachResponse resp) {
|
|
// OnAttach() will resolve the |attach_response|.
|
|
RemoteConsumer* remote_consumer = GetConsumerForCurrentRequest();
|
|
remote_consumer->attach_response = std::move(resp);
|
|
remote_consumer->service_endpoint->Attach(req.key());
|
|
}
|
|
|
|
// Called by the IPC layer.
|
|
void ConsumerIPCService::GetTraceStats(const protos::gen::GetTraceStatsRequest&,
|
|
DeferredGetTraceStatsResponse resp) {
|
|
// OnTraceStats() will resolve the |get_trace_stats_response|.
|
|
RemoteConsumer* remote_consumer = GetConsumerForCurrentRequest();
|
|
remote_consumer->get_trace_stats_response = std::move(resp);
|
|
remote_consumer->service_endpoint->GetTraceStats();
|
|
}
|
|
|
|
// Called by the IPC layer.
|
|
void ConsumerIPCService::ObserveEvents(
|
|
const protos::gen::ObserveEventsRequest& req,
|
|
DeferredObserveEventsResponse resp) {
|
|
RemoteConsumer* remote_consumer = GetConsumerForCurrentRequest();
|
|
|
|
// If there's a prior stream, close it so that client can clean it up.
|
|
remote_consumer->CloseObserveEventsResponseStream();
|
|
|
|
remote_consumer->observe_events_response = std::move(resp);
|
|
|
|
uint32_t events_mask = 0;
|
|
for (const auto& type : req.events_to_observe()) {
|
|
events_mask |= static_cast<uint32_t>(type);
|
|
}
|
|
remote_consumer->service_endpoint->ObserveEvents(events_mask);
|
|
|
|
// If no events are to be observed, close the stream immediately so that the
|
|
// client can clean up.
|
|
if (events_mask == 0)
|
|
remote_consumer->CloseObserveEventsResponseStream();
|
|
}
|
|
|
|
// Called by the IPC layer.
|
|
void ConsumerIPCService::QueryServiceState(
|
|
const protos::gen::QueryServiceStateRequest&,
|
|
DeferredQueryServiceStateResponse resp) {
|
|
RemoteConsumer* remote_consumer = GetConsumerForCurrentRequest();
|
|
auto it = pending_query_service_responses_.insert(
|
|
pending_query_service_responses_.end(), std::move(resp));
|
|
auto weak_this = weak_ptr_factory_.GetWeakPtr();
|
|
auto callback = [weak_this, it](bool success,
|
|
const TracingServiceState& svc_state) {
|
|
if (weak_this)
|
|
weak_this->OnQueryServiceCallback(success, svc_state, std::move(it));
|
|
};
|
|
remote_consumer->service_endpoint->QueryServiceState(callback);
|
|
}
|
|
|
|
// Called by the service in response to service_endpoint->QueryServiceState().
|
|
void ConsumerIPCService::OnQueryServiceCallback(
|
|
bool success,
|
|
const TracingServiceState& svc_state,
|
|
PendingQuerySvcResponses::iterator pending_response_it) {
|
|
DeferredQueryServiceStateResponse response(std::move(*pending_response_it));
|
|
pending_query_service_responses_.erase(pending_response_it);
|
|
if (!success) {
|
|
response.Reject();
|
|
return;
|
|
}
|
|
|
|
// The TracingServiceState object might be too big to fit into a single IPC
|
|
// message because it contains the DataSourceDescriptor of each data source.
|
|
// Here we split it in chunks to fit in the IPC limit, observing the
|
|
// following rule: each chunk must be invididually a valid TracingServiceState
|
|
// message; all the chunks concatenated together must form the original
|
|
// message. This is to deal with the legacy API that was just sending one
|
|
// whole message (failing in presence of too many data sources, b/153142114).
|
|
// The message is split as follows: we take the whole TracingServiceState,
|
|
// take out the data sources section (which is a top-level repeated field)
|
|
// and re-add them one-by-one. If, in the process of appending, the IPC msg
|
|
// size is reached, a new chunk is created. This assumes that the rest of
|
|
// TracingServiceState fits in one IPC message and each DataSourceDescriptor
|
|
// fits in the worst case in a dedicated message (which is true, because
|
|
// otherwise the RegisterDataSource() which passes the descriptor in the first
|
|
// place would fail).
|
|
|
|
std::vector<uint8_t> chunked_reply;
|
|
|
|
// Transmits the current chunk and starts a new one.
|
|
bool sent_eof = false;
|
|
auto send_chunked_reply = [&chunked_reply, &response,
|
|
&sent_eof](bool has_more) {
|
|
PERFETTO_CHECK(!sent_eof);
|
|
sent_eof = !has_more;
|
|
auto resp =
|
|
ipc::AsyncResult<protos::gen::QueryServiceStateResponse>::Create();
|
|
resp.set_has_more(has_more);
|
|
PERFETTO_CHECK(resp->mutable_service_state()->ParseFromArray(
|
|
chunked_reply.data(), chunked_reply.size()));
|
|
chunked_reply.clear();
|
|
response.Resolve(std::move(resp));
|
|
};
|
|
|
|
// Create a copy of the whole response and cut away the data_sources section.
|
|
protos::gen::TracingServiceState svc_state_copy = svc_state;
|
|
auto data_sources = std::move(*svc_state_copy.mutable_data_sources());
|
|
chunked_reply = svc_state_copy.SerializeAsArray();
|
|
|
|
// Now re-add them fitting within the IPC message limits (- some margin for
|
|
// the outer IPC frame).
|
|
constexpr size_t kMaxMsgSize = ipc::kIPCBufferSize - 128;
|
|
for (const auto& data_source : data_sources) {
|
|
protos::gen::TracingServiceState tmp;
|
|
tmp.mutable_data_sources()->emplace_back(std::move(data_source));
|
|
std::vector<uint8_t> chunk = tmp.SerializeAsArray();
|
|
if (chunked_reply.size() + chunk.size() < kMaxMsgSize) {
|
|
chunked_reply.insert(chunked_reply.end(), chunk.begin(), chunk.end());
|
|
} else {
|
|
send_chunked_reply(/*has_more=*/true);
|
|
chunked_reply = std::move(chunk);
|
|
}
|
|
}
|
|
|
|
PERFETTO_DCHECK(!chunked_reply.empty());
|
|
send_chunked_reply(/*has_more=*/false);
|
|
PERFETTO_CHECK(sent_eof);
|
|
}
|
|
|
|
// Called by the service in response to a service_endpoint->Flush() request.
|
|
void ConsumerIPCService::OnFlushCallback(
|
|
bool success,
|
|
PendingFlushResponses::iterator pending_response_it) {
|
|
DeferredFlushResponse response(std::move(*pending_response_it));
|
|
pending_flush_responses_.erase(pending_response_it);
|
|
if (success) {
|
|
response.Resolve(ipc::AsyncResult<protos::gen::FlushResponse>::Create());
|
|
} else {
|
|
response.Reject();
|
|
}
|
|
}
|
|
|
|
void ConsumerIPCService::QueryCapabilities(
|
|
const protos::gen::QueryCapabilitiesRequest&,
|
|
DeferredQueryCapabilitiesResponse resp) {
|
|
RemoteConsumer* remote_consumer = GetConsumerForCurrentRequest();
|
|
auto it = pending_query_capabilities_responses_.insert(
|
|
pending_query_capabilities_responses_.end(), std::move(resp));
|
|
auto weak_this = weak_ptr_factory_.GetWeakPtr();
|
|
auto callback = [weak_this, it](const TracingServiceCapabilities& caps) {
|
|
if (weak_this)
|
|
weak_this->OnQueryCapabilitiesCallback(caps, std::move(it));
|
|
};
|
|
remote_consumer->service_endpoint->QueryCapabilities(callback);
|
|
}
|
|
|
|
// Called by the service in response to service_endpoint->QueryCapabilities().
|
|
void ConsumerIPCService::OnQueryCapabilitiesCallback(
|
|
const TracingServiceCapabilities& caps,
|
|
PendingQueryCapabilitiesResponses::iterator pending_response_it) {
|
|
DeferredQueryCapabilitiesResponse response(std::move(*pending_response_it));
|
|
pending_query_capabilities_responses_.erase(pending_response_it);
|
|
auto resp =
|
|
ipc::AsyncResult<protos::gen::QueryCapabilitiesResponse>::Create();
|
|
*resp->mutable_capabilities() = caps;
|
|
response.Resolve(std::move(resp));
|
|
}
|
|
|
|
////////////////////////////////////////////////////////////////////////////////
|
|
// RemoteConsumer methods
|
|
////////////////////////////////////////////////////////////////////////////////
|
|
|
|
ConsumerIPCService::RemoteConsumer::RemoteConsumer() = default;
|
|
ConsumerIPCService::RemoteConsumer::~RemoteConsumer() = default;
|
|
|
|
// Invoked by the |core_service_| business logic after the ConnectConsumer()
|
|
// call. There is nothing to do here, we really expected the ConnectConsumer()
|
|
// to just work in the local case.
|
|
void ConsumerIPCService::RemoteConsumer::OnConnect() {}
|
|
|
|
// Invoked by the |core_service_| business logic after we destroy the
|
|
// |service_endpoint| (in the RemoteConsumer dtor).
|
|
void ConsumerIPCService::RemoteConsumer::OnDisconnect() {}
|
|
|
|
void ConsumerIPCService::RemoteConsumer::OnTracingDisabled() {
|
|
if (enable_tracing_response.IsBound()) {
|
|
auto result =
|
|
ipc::AsyncResult<protos::gen::EnableTracingResponse>::Create();
|
|
result->set_disabled(true);
|
|
enable_tracing_response.Resolve(std::move(result));
|
|
}
|
|
}
|
|
|
|
void ConsumerIPCService::RemoteConsumer::OnTraceData(
|
|
std::vector<TracePacket> trace_packets,
|
|
bool has_more) {
|
|
if (!read_buffers_response.IsBound())
|
|
return;
|
|
|
|
auto result = ipc::AsyncResult<protos::gen::ReadBuffersResponse>::Create();
|
|
|
|
// A TracePacket might be too big to fit into a single IPC message (max
|
|
// kIPCBufferSize). However a TracePacket is made of slices and each slice
|
|
// is way smaller than kIPCBufferSize (a slice size is effectively bounded by
|
|
// the max chunk size of the SharedMemoryABI). When sending a TracePacket,
|
|
// if its slices don't fit within one IPC, chunk them over several contiguous
|
|
// IPCs using the |last_slice_for_packet| for glueing on the other side.
|
|
static_assert(ipc::kIPCBufferSize >= SharedMemoryABI::kMaxPageSize * 2,
|
|
"kIPCBufferSize too small given the max possible slice size");
|
|
|
|
auto send_ipc_reply = [this, &result](bool more) {
|
|
result.set_has_more(more);
|
|
read_buffers_response.Resolve(std::move(result));
|
|
result = ipc::AsyncResult<protos::gen::ReadBuffersResponse>::Create();
|
|
};
|
|
|
|
size_t approx_reply_size = 0;
|
|
for (const TracePacket& trace_packet : trace_packets) {
|
|
size_t num_slices_left_for_packet = trace_packet.slices().size();
|
|
for (const Slice& slice : trace_packet.slices()) {
|
|
// Check if this slice would cause the IPC to overflow its max size and,
|
|
// if that is the case, split the IPCs. The "16" and "64" below are
|
|
// over-estimations of, respectively:
|
|
// 16: the preamble that prefixes each slice (there are 2 x size fields
|
|
// in the proto + the |last_slice_for_packet| bool).
|
|
// 64: the overhead of the IPC InvokeMethodReply + wire_protocol's frame.
|
|
// If these estimations are wrong, BufferedFrameDeserializer::Serialize()
|
|
// will hit a DCHECK anyways.
|
|
const size_t approx_slice_size = slice.size + 16;
|
|
if (approx_reply_size + approx_slice_size > ipc::kIPCBufferSize - 64) {
|
|
// If we hit this CHECK we got a single slice that is > kIPCBufferSize.
|
|
PERFETTO_CHECK(result->slices_size() > 0);
|
|
send_ipc_reply(/*has_more=*/true);
|
|
approx_reply_size = 0;
|
|
}
|
|
approx_reply_size += approx_slice_size;
|
|
|
|
auto* res_slice = result->add_slices();
|
|
res_slice->set_last_slice_for_packet(--num_slices_left_for_packet == 0);
|
|
res_slice->set_data(slice.start, slice.size);
|
|
}
|
|
}
|
|
send_ipc_reply(has_more);
|
|
}
|
|
|
|
void ConsumerIPCService::RemoteConsumer::OnDetach(bool success) {
|
|
if (!success) {
|
|
std::move(detach_response).Reject();
|
|
return;
|
|
}
|
|
auto resp = ipc::AsyncResult<protos::gen::DetachResponse>::Create();
|
|
std::move(detach_response).Resolve(std::move(resp));
|
|
}
|
|
|
|
void ConsumerIPCService::RemoteConsumer::OnAttach(
|
|
bool success,
|
|
const TraceConfig& trace_config) {
|
|
if (!success) {
|
|
std::move(attach_response).Reject();
|
|
return;
|
|
}
|
|
auto response = ipc::AsyncResult<protos::gen::AttachResponse>::Create();
|
|
*response->mutable_trace_config() = trace_config;
|
|
std::move(attach_response).Resolve(std::move(response));
|
|
}
|
|
|
|
void ConsumerIPCService::RemoteConsumer::OnTraceStats(bool success,
|
|
const TraceStats& stats) {
|
|
if (!success) {
|
|
std::move(get_trace_stats_response).Reject();
|
|
return;
|
|
}
|
|
auto response =
|
|
ipc::AsyncResult<protos::gen::GetTraceStatsResponse>::Create();
|
|
*response->mutable_trace_stats() = stats;
|
|
std::move(get_trace_stats_response).Resolve(std::move(response));
|
|
}
|
|
|
|
void ConsumerIPCService::RemoteConsumer::OnObservableEvents(
|
|
const ObservableEvents& events) {
|
|
if (!observe_events_response.IsBound())
|
|
return;
|
|
|
|
auto result = ipc::AsyncResult<protos::gen::ObserveEventsResponse>::Create();
|
|
result.set_has_more(true);
|
|
*result->mutable_events() = events;
|
|
observe_events_response.Resolve(std::move(result));
|
|
}
|
|
|
|
void ConsumerIPCService::RemoteConsumer::CloseObserveEventsResponseStream() {
|
|
if (!observe_events_response.IsBound())
|
|
return;
|
|
|
|
auto result = ipc::AsyncResult<protos::gen::ObserveEventsResponse>::Create();
|
|
result.set_has_more(false);
|
|
observe_events_response.Resolve(std::move(result));
|
|
}
|
|
|
|
} // namespace perfetto
|
|
// gen_amalgamated begin source: src/tracing/ipc/service/producer_ipc_service.cc
|
|
// gen_amalgamated begin header: src/tracing/ipc/service/producer_ipc_service.h
|
|
/*
|
|
* Copyright (C) 2017 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
#ifndef SRC_TRACING_IPC_SERVICE_PRODUCER_IPC_SERVICE_H_
|
|
#define SRC_TRACING_IPC_SERVICE_PRODUCER_IPC_SERVICE_H_
|
|
|
|
#include <list>
|
|
#include <map>
|
|
#include <memory>
|
|
#include <string>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/weak_ptr.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/ipc/basic_types.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/producer.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/tracing_service.h"
|
|
|
|
// gen_amalgamated expanded: #include "protos/perfetto/ipc/producer_port.ipc.h"
|
|
|
|
namespace perfetto {
|
|
|
|
namespace ipc {
|
|
class Host;
|
|
} // namespace ipc
|
|
|
|
// Implements the Producer port of the IPC service. This class proxies requests
|
|
// and responses between the core service logic (|svc_|) and remote Producer(s)
|
|
// on the IPC socket, through the methods overriddden from ProducerPort.
|
|
class ProducerIPCService : public protos::gen::ProducerPort {
|
|
public:
|
|
explicit ProducerIPCService(TracingService* core_service);
|
|
~ProducerIPCService() override;
|
|
|
|
// ProducerPort implementation (from .proto IPC definition).
|
|
void InitializeConnection(const protos::gen::InitializeConnectionRequest&,
|
|
DeferredInitializeConnectionResponse) override;
|
|
void RegisterDataSource(const protos::gen::RegisterDataSourceRequest&,
|
|
DeferredRegisterDataSourceResponse) override;
|
|
void UnregisterDataSource(const protos::gen::UnregisterDataSourceRequest&,
|
|
DeferredUnregisterDataSourceResponse) override;
|
|
void RegisterTraceWriter(const protos::gen::RegisterTraceWriterRequest&,
|
|
DeferredRegisterTraceWriterResponse) override;
|
|
void UnregisterTraceWriter(const protos::gen::UnregisterTraceWriterRequest&,
|
|
DeferredUnregisterTraceWriterResponse) override;
|
|
void CommitData(const protos::gen::CommitDataRequest&,
|
|
DeferredCommitDataResponse) override;
|
|
void NotifyDataSourceStarted(
|
|
const protos::gen::NotifyDataSourceStartedRequest&,
|
|
DeferredNotifyDataSourceStartedResponse) override;
|
|
void NotifyDataSourceStopped(
|
|
const protos::gen::NotifyDataSourceStoppedRequest&,
|
|
DeferredNotifyDataSourceStoppedResponse) override;
|
|
void ActivateTriggers(const protos::gen::ActivateTriggersRequest&,
|
|
DeferredActivateTriggersResponse) override;
|
|
|
|
void GetAsyncCommand(const protos::gen::GetAsyncCommandRequest&,
|
|
DeferredGetAsyncCommandResponse) override;
|
|
void Sync(const protos::gen::SyncRequest&, DeferredSyncResponse) override;
|
|
void OnClientDisconnected() override;
|
|
|
|
private:
|
|
// Acts like a Producer with the core Service business logic (which doesn't
|
|
// know anything about the remote transport), but all it does is proxying
|
|
// methods to the remote Producer on the other side of the IPC channel.
|
|
class RemoteProducer : public Producer {
|
|
public:
|
|
RemoteProducer();
|
|
~RemoteProducer() override;
|
|
|
|
// These methods are called by the |core_service_| business logic. There is
|
|
// no connection here, these methods are posted straight away.
|
|
void OnConnect() override;
|
|
void OnDisconnect() override;
|
|
void SetupDataSource(DataSourceInstanceID,
|
|
const DataSourceConfig&) override;
|
|
void StartDataSource(DataSourceInstanceID,
|
|
const DataSourceConfig&) override;
|
|
void StopDataSource(DataSourceInstanceID) override;
|
|
void OnTracingSetup() override;
|
|
void Flush(FlushRequestID,
|
|
const DataSourceInstanceID* data_source_ids,
|
|
size_t num_data_sources) override;
|
|
|
|
void ClearIncrementalState(const DataSourceInstanceID* data_source_ids,
|
|
size_t num_data_sources) override;
|
|
|
|
void SendSetupTracing();
|
|
|
|
// The interface obtained from the core service business logic through
|
|
// Service::ConnectProducer(this). This allows to invoke methods for a
|
|
// specific Producer on the Service business logic.
|
|
std::unique_ptr<TracingService::ProducerEndpoint> service_endpoint;
|
|
|
|
// The back-channel (based on a never ending stream request) that allows us
|
|
// to send asynchronous commands to the remote Producer (e.g. start/stop a
|
|
// data source).
|
|
DeferredGetAsyncCommandResponse async_producer_commands;
|
|
|
|
// Set if the service calls OnTracingSetup() before the
|
|
// |async_producer_commands| was bound by the service. In this case, we
|
|
// forward the SetupTracing command when it is bound later.
|
|
bool send_setup_tracing_on_async_commands_bound = false;
|
|
};
|
|
|
|
ProducerIPCService(const ProducerIPCService&) = delete;
|
|
ProducerIPCService& operator=(const ProducerIPCService&) = delete;
|
|
|
|
// Returns the ProducerEndpoint in the core business logic that corresponds to
|
|
// the current IPC request.
|
|
RemoteProducer* GetProducerForCurrentRequest();
|
|
|
|
TracingService* const core_service_;
|
|
|
|
// Maps IPC clients to ProducerEndpoint instances registered on the
|
|
// |core_service_| business logic.
|
|
std::map<ipc::ClientID, std::unique_ptr<RemoteProducer>> producers_;
|
|
|
|
// List because pointers need to be stable.
|
|
std::list<DeferredSyncResponse> pending_syncs_;
|
|
|
|
base::WeakPtrFactory<ProducerIPCService> weak_ptr_factory_; // Keep last.
|
|
};
|
|
|
|
} // namespace perfetto
|
|
|
|
#endif // SRC_TRACING_IPC_SERVICE_PRODUCER_IPC_SERVICE_H_
|
|
/*
|
|
* Copyright (C) 2017 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
// gen_amalgamated expanded: #include "src/tracing/ipc/service/producer_ipc_service.h"
|
|
|
|
#include <inttypes.h>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/base/logging.h"
|
|
// gen_amalgamated expanded: #include "perfetto/base/task_runner.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/ipc/host.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/ipc/service.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/commit_data_request.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/tracing_service.h"
|
|
// gen_amalgamated expanded: #include "perfetto/tracing/core/data_source_config.h"
|
|
// gen_amalgamated expanded: #include "perfetto/tracing/core/data_source_descriptor.h"
|
|
// gen_amalgamated expanded: #include "src/tracing/ipc/posix_shared_memory.h"
|
|
|
|
// The remote Producer(s) are not trusted. All the methods from the ProducerPort
|
|
// IPC layer (e.g. RegisterDataSource()) must assume that the remote Producer is
|
|
// compromised.
|
|
|
|
namespace perfetto {
|
|
|
|
ProducerIPCService::ProducerIPCService(TracingService* core_service)
|
|
: core_service_(core_service), weak_ptr_factory_(this) {}
|
|
|
|
ProducerIPCService::~ProducerIPCService() = default;
|
|
|
|
ProducerIPCService::RemoteProducer*
|
|
ProducerIPCService::GetProducerForCurrentRequest() {
|
|
const ipc::ClientID ipc_client_id = ipc::Service::client_info().client_id();
|
|
PERFETTO_CHECK(ipc_client_id);
|
|
auto it = producers_.find(ipc_client_id);
|
|
if (it == producers_.end())
|
|
return nullptr;
|
|
return it->second.get();
|
|
}
|
|
|
|
// Called by the remote Producer through the IPC channel soon after connecting.
|
|
void ProducerIPCService::InitializeConnection(
|
|
const protos::gen::InitializeConnectionRequest& req,
|
|
DeferredInitializeConnectionResponse response) {
|
|
const auto& client_info = ipc::Service::client_info();
|
|
const ipc::ClientID ipc_client_id = client_info.client_id();
|
|
PERFETTO_CHECK(ipc_client_id);
|
|
|
|
if (producers_.count(ipc_client_id) > 0) {
|
|
PERFETTO_DLOG(
|
|
"The remote Producer is trying to re-initialize the connection");
|
|
return response.Reject();
|
|
}
|
|
|
|
// Create a new entry.
|
|
std::unique_ptr<RemoteProducer> producer(new RemoteProducer());
|
|
|
|
TracingService::ProducerSMBScrapingMode smb_scraping_mode =
|
|
TracingService::ProducerSMBScrapingMode::kDefault;
|
|
switch (req.smb_scraping_mode()) {
|
|
case protos::gen::InitializeConnectionRequest::SMB_SCRAPING_UNSPECIFIED:
|
|
break;
|
|
case protos::gen::InitializeConnectionRequest::SMB_SCRAPING_DISABLED:
|
|
smb_scraping_mode = TracingService::ProducerSMBScrapingMode::kDisabled;
|
|
break;
|
|
case protos::gen::InitializeConnectionRequest::SMB_SCRAPING_ENABLED:
|
|
smb_scraping_mode = TracingService::ProducerSMBScrapingMode::kEnabled;
|
|
break;
|
|
}
|
|
|
|
bool dcheck_mismatch = false;
|
|
#if PERFETTO_DCHECK_IS_ON()
|
|
dcheck_mismatch =
|
|
req.build_flags() ==
|
|
protos::gen::InitializeConnectionRequest::BUILD_FLAGS_DCHECKS_OFF;
|
|
#else
|
|
dcheck_mismatch =
|
|
req.build_flags() ==
|
|
protos::gen::InitializeConnectionRequest::BUILD_FLAGS_DCHECKS_ON;
|
|
#endif
|
|
if (dcheck_mismatch) {
|
|
PERFETTO_LOG(
|
|
"The producer and the service binaries are built using different "
|
|
"DEBUG/NDEBUG flags. This will likely cause crashes.");
|
|
}
|
|
|
|
// If the producer provided an SMB, tell the service to attempt to adopt it.
|
|
std::unique_ptr<SharedMemory> shmem;
|
|
if (req.producer_provided_shmem()) {
|
|
base::ScopedFile shmem_fd = ipc::Service::TakeReceivedFD();
|
|
if (shmem_fd) {
|
|
shmem = PosixSharedMemory::AttachToFd(
|
|
std::move(shmem_fd), /*require_seals_if_supported=*/true);
|
|
if (!shmem) {
|
|
PERFETTO_ELOG(
|
|
"Couldn't map producer-provided SMB, falling back to "
|
|
"service-provided SMB");
|
|
}
|
|
} else {
|
|
PERFETTO_DLOG(
|
|
"InitializeConnectionRequest's producer_provided_shmem flag is set "
|
|
"but the producer didn't provide an FD");
|
|
}
|
|
}
|
|
|
|
// ConnectProducer will call OnConnect() on the next task.
|
|
producer->service_endpoint = core_service_->ConnectProducer(
|
|
producer.get(), client_info.uid(), req.producer_name(),
|
|
req.shared_memory_size_hint_bytes(),
|
|
/*in_process=*/false, smb_scraping_mode,
|
|
req.shared_memory_page_size_hint_bytes(), std::move(shmem));
|
|
|
|
// Could happen if the service has too many producers connected.
|
|
if (!producer->service_endpoint) {
|
|
response.Reject();
|
|
return;
|
|
}
|
|
|
|
bool using_producer_shmem =
|
|
producer->service_endpoint->IsShmemProvidedByProducer();
|
|
|
|
producers_.emplace(ipc_client_id, std::move(producer));
|
|
// Because of the std::move() |producer| is invalid after this point.
|
|
|
|
auto async_res =
|
|
ipc::AsyncResult<protos::gen::InitializeConnectionResponse>::Create();
|
|
async_res->set_using_shmem_provided_by_producer(using_producer_shmem);
|
|
response.Resolve(std::move(async_res));
|
|
}
|
|
|
|
// Called by the remote Producer through the IPC channel.
|
|
void ProducerIPCService::RegisterDataSource(
|
|
const protos::gen::RegisterDataSourceRequest& req,
|
|
DeferredRegisterDataSourceResponse response) {
|
|
RemoteProducer* producer = GetProducerForCurrentRequest();
|
|
if (!producer) {
|
|
PERFETTO_DLOG(
|
|
"Producer invoked RegisterDataSource() before InitializeConnection()");
|
|
if (response.IsBound())
|
|
response.Reject();
|
|
return;
|
|
}
|
|
|
|
const DataSourceDescriptor& dsd = req.data_source_descriptor();
|
|
GetProducerForCurrentRequest()->service_endpoint->RegisterDataSource(dsd);
|
|
|
|
// RegisterDataSource doesn't expect any meaningful response.
|
|
if (response.IsBound()) {
|
|
response.Resolve(
|
|
ipc::AsyncResult<protos::gen::RegisterDataSourceResponse>::Create());
|
|
}
|
|
}
|
|
|
|
// Called by the IPC layer.
|
|
void ProducerIPCService::OnClientDisconnected() {
|
|
ipc::ClientID client_id = ipc::Service::client_info().client_id();
|
|
PERFETTO_DLOG("Client %" PRIu64 " disconnected", client_id);
|
|
producers_.erase(client_id);
|
|
}
|
|
|
|
// TODO(fmayer): test what happens if we receive the following tasks, in order:
|
|
// RegisterDataSource, UnregisterDataSource, OnDataSourceRegistered.
|
|
// which essentially means that the client posted back to back a
|
|
// ReqisterDataSource and UnregisterDataSource speculating on the next id.
|
|
// Called by the remote Service through the IPC channel.
|
|
void ProducerIPCService::UnregisterDataSource(
|
|
const protos::gen::UnregisterDataSourceRequest& req,
|
|
DeferredUnregisterDataSourceResponse response) {
|
|
RemoteProducer* producer = GetProducerForCurrentRequest();
|
|
if (!producer) {
|
|
PERFETTO_DLOG(
|
|
"Producer invoked UnregisterDataSource() before "
|
|
"InitializeConnection()");
|
|
if (response.IsBound())
|
|
response.Reject();
|
|
return;
|
|
}
|
|
producer->service_endpoint->UnregisterDataSource(req.data_source_name());
|
|
|
|
// UnregisterDataSource doesn't expect any meaningful response.
|
|
if (response.IsBound()) {
|
|
response.Resolve(
|
|
ipc::AsyncResult<protos::gen::UnregisterDataSourceResponse>::Create());
|
|
}
|
|
}
|
|
|
|
void ProducerIPCService::RegisterTraceWriter(
|
|
const protos::gen::RegisterTraceWriterRequest& req,
|
|
DeferredRegisterTraceWriterResponse response) {
|
|
RemoteProducer* producer = GetProducerForCurrentRequest();
|
|
if (!producer) {
|
|
PERFETTO_DLOG(
|
|
"Producer invoked RegisterTraceWriter() before "
|
|
"InitializeConnection()");
|
|
if (response.IsBound())
|
|
response.Reject();
|
|
return;
|
|
}
|
|
producer->service_endpoint->RegisterTraceWriter(req.trace_writer_id(),
|
|
req.target_buffer());
|
|
|
|
// RegisterTraceWriter doesn't expect any meaningful response.
|
|
if (response.IsBound()) {
|
|
response.Resolve(
|
|
ipc::AsyncResult<protos::gen::RegisterTraceWriterResponse>::Create());
|
|
}
|
|
}
|
|
|
|
void ProducerIPCService::UnregisterTraceWriter(
|
|
const protos::gen::UnregisterTraceWriterRequest& req,
|
|
DeferredUnregisterTraceWriterResponse response) {
|
|
RemoteProducer* producer = GetProducerForCurrentRequest();
|
|
if (!producer) {
|
|
PERFETTO_DLOG(
|
|
"Producer invoked UnregisterTraceWriter() before "
|
|
"InitializeConnection()");
|
|
if (response.IsBound())
|
|
response.Reject();
|
|
return;
|
|
}
|
|
producer->service_endpoint->UnregisterTraceWriter(req.trace_writer_id());
|
|
|
|
// UnregisterTraceWriter doesn't expect any meaningful response.
|
|
if (response.IsBound()) {
|
|
response.Resolve(
|
|
ipc::AsyncResult<protos::gen::UnregisterTraceWriterResponse>::Create());
|
|
}
|
|
}
|
|
|
|
void ProducerIPCService::CommitData(const protos::gen::CommitDataRequest& req,
|
|
DeferredCommitDataResponse resp) {
|
|
RemoteProducer* producer = GetProducerForCurrentRequest();
|
|
if (!producer) {
|
|
PERFETTO_DLOG(
|
|
"Producer invoked CommitData() before InitializeConnection()");
|
|
if (resp.IsBound())
|
|
resp.Reject();
|
|
return;
|
|
}
|
|
|
|
// We don't want to send a response if the client didn't attach a callback to
|
|
// the original request. Doing so would generate unnecessary wakeups and
|
|
// context switches.
|
|
std::function<void()> callback;
|
|
if (resp.IsBound()) {
|
|
// Capturing |resp| by reference here speculates on the fact that
|
|
// CommitData() in tracing_service_impl.cc invokes the passed callback
|
|
// inline, without posting it. If that assumption changes this code needs to
|
|
// wrap the response in a shared_ptr (C+11 lambdas don't support move) and
|
|
// use a weak ptr in the caller.
|
|
callback = [&resp] {
|
|
resp.Resolve(ipc::AsyncResult<protos::gen::CommitDataResponse>::Create());
|
|
};
|
|
}
|
|
producer->service_endpoint->CommitData(req, callback);
|
|
}
|
|
|
|
void ProducerIPCService::NotifyDataSourceStarted(
|
|
const protos::gen::NotifyDataSourceStartedRequest& request,
|
|
DeferredNotifyDataSourceStartedResponse response) {
|
|
RemoteProducer* producer = GetProducerForCurrentRequest();
|
|
if (!producer) {
|
|
PERFETTO_DLOG(
|
|
"Producer invoked NotifyDataSourceStarted() before "
|
|
"InitializeConnection()");
|
|
if (response.IsBound())
|
|
response.Reject();
|
|
return;
|
|
}
|
|
producer->service_endpoint->NotifyDataSourceStarted(request.data_source_id());
|
|
|
|
// NotifyDataSourceStopped shouldn't expect any meaningful response, avoid
|
|
// a useless IPC in that case.
|
|
if (response.IsBound()) {
|
|
response.Resolve(ipc::AsyncResult<
|
|
protos::gen::NotifyDataSourceStartedResponse>::Create());
|
|
}
|
|
}
|
|
|
|
void ProducerIPCService::NotifyDataSourceStopped(
|
|
const protos::gen::NotifyDataSourceStoppedRequest& request,
|
|
DeferredNotifyDataSourceStoppedResponse response) {
|
|
RemoteProducer* producer = GetProducerForCurrentRequest();
|
|
if (!producer) {
|
|
PERFETTO_DLOG(
|
|
"Producer invoked NotifyDataSourceStopped() before "
|
|
"InitializeConnection()");
|
|
if (response.IsBound())
|
|
response.Reject();
|
|
return;
|
|
}
|
|
producer->service_endpoint->NotifyDataSourceStopped(request.data_source_id());
|
|
|
|
// NotifyDataSourceStopped shouldn't expect any meaningful response, avoid
|
|
// a useless IPC in that case.
|
|
if (response.IsBound()) {
|
|
response.Resolve(ipc::AsyncResult<
|
|
protos::gen::NotifyDataSourceStoppedResponse>::Create());
|
|
}
|
|
}
|
|
|
|
void ProducerIPCService::ActivateTriggers(
|
|
const protos::gen::ActivateTriggersRequest& proto_req,
|
|
DeferredActivateTriggersResponse resp) {
|
|
RemoteProducer* producer = GetProducerForCurrentRequest();
|
|
if (!producer) {
|
|
PERFETTO_DLOG(
|
|
"Producer invoked ActivateTriggers() before InitializeConnection()");
|
|
if (resp.IsBound())
|
|
resp.Reject();
|
|
return;
|
|
}
|
|
std::vector<std::string> triggers;
|
|
for (const auto& name : proto_req.trigger_names()) {
|
|
triggers.push_back(name);
|
|
}
|
|
producer->service_endpoint->ActivateTriggers(triggers);
|
|
// ActivateTriggers shouldn't expect any meaningful response, avoid
|
|
// a useless IPC in that case.
|
|
if (resp.IsBound()) {
|
|
resp.Resolve(
|
|
ipc::AsyncResult<protos::gen::ActivateTriggersResponse>::Create());
|
|
}
|
|
}
|
|
|
|
void ProducerIPCService::GetAsyncCommand(
|
|
const protos::gen::GetAsyncCommandRequest&,
|
|
DeferredGetAsyncCommandResponse response) {
|
|
RemoteProducer* producer = GetProducerForCurrentRequest();
|
|
if (!producer) {
|
|
PERFETTO_DLOG(
|
|
"Producer invoked GetAsyncCommand() before "
|
|
"InitializeConnection()");
|
|
return response.Reject();
|
|
}
|
|
// Keep the back channel open, without ever resolving the ipc::Deferred fully,
|
|
// to send async commands to the RemoteProducer (e.g., starting/stopping a
|
|
// data source).
|
|
producer->async_producer_commands = std::move(response);
|
|
|
|
// Service may already have issued the OnTracingSetup() event, in which case
|
|
// we should forward it to the producer now.
|
|
if (producer->send_setup_tracing_on_async_commands_bound)
|
|
producer->SendSetupTracing();
|
|
}
|
|
|
|
void ProducerIPCService::Sync(const protos::gen::SyncRequest&,
|
|
DeferredSyncResponse resp) {
|
|
RemoteProducer* producer = GetProducerForCurrentRequest();
|
|
if (!producer) {
|
|
PERFETTO_DLOG("Producer invoked Sync() before InitializeConnection()");
|
|
return resp.Reject();
|
|
}
|
|
auto weak_this = weak_ptr_factory_.GetWeakPtr();
|
|
auto resp_it = pending_syncs_.insert(pending_syncs_.end(), std::move(resp));
|
|
auto callback = [weak_this, resp_it]() {
|
|
if (!weak_this)
|
|
return;
|
|
auto pending_resp = std::move(*resp_it);
|
|
weak_this->pending_syncs_.erase(resp_it);
|
|
pending_resp.Resolve(ipc::AsyncResult<protos::gen::SyncResponse>::Create());
|
|
};
|
|
producer->service_endpoint->Sync(callback);
|
|
}
|
|
|
|
////////////////////////////////////////////////////////////////////////////////
|
|
// RemoteProducer methods
|
|
////////////////////////////////////////////////////////////////////////////////
|
|
|
|
ProducerIPCService::RemoteProducer::RemoteProducer() = default;
|
|
ProducerIPCService::RemoteProducer::~RemoteProducer() = default;
|
|
|
|
// Invoked by the |core_service_| business logic after the ConnectProducer()
|
|
// call. There is nothing to do here, we really expected the ConnectProducer()
|
|
// to just work in the local case.
|
|
void ProducerIPCService::RemoteProducer::OnConnect() {}
|
|
|
|
// Invoked by the |core_service_| business logic after we destroy the
|
|
// |service_endpoint| (in the RemoteProducer dtor).
|
|
void ProducerIPCService::RemoteProducer::OnDisconnect() {}
|
|
|
|
// Invoked by the |core_service_| business logic when it wants to create a new
|
|
// data source.
|
|
void ProducerIPCService::RemoteProducer::SetupDataSource(
|
|
DataSourceInstanceID dsid,
|
|
const DataSourceConfig& cfg) {
|
|
if (!async_producer_commands.IsBound()) {
|
|
PERFETTO_DLOG(
|
|
"The Service tried to create a new data source but the remote Producer "
|
|
"has not yet initialized the connection");
|
|
return;
|
|
}
|
|
auto cmd = ipc::AsyncResult<protos::gen::GetAsyncCommandResponse>::Create();
|
|
cmd.set_has_more(true);
|
|
cmd->mutable_setup_data_source()->set_new_instance_id(dsid);
|
|
*cmd->mutable_setup_data_source()->mutable_config() = cfg;
|
|
async_producer_commands.Resolve(std::move(cmd));
|
|
}
|
|
|
|
// Invoked by the |core_service_| business logic when it wants to start a new
|
|
// data source.
|
|
void ProducerIPCService::RemoteProducer::StartDataSource(
|
|
DataSourceInstanceID dsid,
|
|
const DataSourceConfig& cfg) {
|
|
if (!async_producer_commands.IsBound()) {
|
|
PERFETTO_DLOG(
|
|
"The Service tried to start a new data source but the remote Producer "
|
|
"has not yet initialized the connection");
|
|
return;
|
|
}
|
|
auto cmd = ipc::AsyncResult<protos::gen::GetAsyncCommandResponse>::Create();
|
|
cmd.set_has_more(true);
|
|
cmd->mutable_start_data_source()->set_new_instance_id(dsid);
|
|
*cmd->mutable_start_data_source()->mutable_config() = cfg;
|
|
async_producer_commands.Resolve(std::move(cmd));
|
|
}
|
|
|
|
void ProducerIPCService::RemoteProducer::StopDataSource(
|
|
DataSourceInstanceID dsid) {
|
|
if (!async_producer_commands.IsBound()) {
|
|
PERFETTO_DLOG(
|
|
"The Service tried to stop a data source but the remote Producer "
|
|
"has not yet initialized the connection");
|
|
return;
|
|
}
|
|
auto cmd = ipc::AsyncResult<protos::gen::GetAsyncCommandResponse>::Create();
|
|
cmd.set_has_more(true);
|
|
cmd->mutable_stop_data_source()->set_instance_id(dsid);
|
|
async_producer_commands.Resolve(std::move(cmd));
|
|
}
|
|
|
|
void ProducerIPCService::RemoteProducer::OnTracingSetup() {
|
|
if (!async_producer_commands.IsBound()) {
|
|
// Service may call this before the producer issued GetAsyncCommand.
|
|
send_setup_tracing_on_async_commands_bound = true;
|
|
return;
|
|
}
|
|
SendSetupTracing();
|
|
}
|
|
|
|
void ProducerIPCService::RemoteProducer::SendSetupTracing() {
|
|
PERFETTO_CHECK(async_producer_commands.IsBound());
|
|
PERFETTO_CHECK(service_endpoint->shared_memory());
|
|
auto cmd = ipc::AsyncResult<protos::gen::GetAsyncCommandResponse>::Create();
|
|
cmd.set_has_more(true);
|
|
auto setup_tracing = cmd->mutable_setup_tracing();
|
|
if (!service_endpoint->IsShmemProvidedByProducer()) {
|
|
// Nominal case (% Chrome): service provides SMB.
|
|
setup_tracing->set_shared_buffer_page_size_kb(
|
|
static_cast<uint32_t>(service_endpoint->shared_buffer_page_size_kb()));
|
|
const int shm_fd =
|
|
static_cast<PosixSharedMemory*>(service_endpoint->shared_memory())
|
|
->fd();
|
|
cmd.set_fd(shm_fd);
|
|
}
|
|
async_producer_commands.Resolve(std::move(cmd));
|
|
}
|
|
|
|
void ProducerIPCService::RemoteProducer::Flush(
|
|
FlushRequestID flush_request_id,
|
|
const DataSourceInstanceID* data_source_ids,
|
|
size_t num_data_sources) {
|
|
if (!async_producer_commands.IsBound()) {
|
|
PERFETTO_DLOG(
|
|
"The Service tried to request a flush but the remote Producer has not "
|
|
"yet initialized the connection");
|
|
return;
|
|
}
|
|
auto cmd = ipc::AsyncResult<protos::gen::GetAsyncCommandResponse>::Create();
|
|
cmd.set_has_more(true);
|
|
for (size_t i = 0; i < num_data_sources; i++)
|
|
cmd->mutable_flush()->add_data_source_ids(data_source_ids[i]);
|
|
cmd->mutable_flush()->set_request_id(flush_request_id);
|
|
async_producer_commands.Resolve(std::move(cmd));
|
|
}
|
|
|
|
void ProducerIPCService::RemoteProducer::ClearIncrementalState(
|
|
const DataSourceInstanceID* data_source_ids,
|
|
size_t num_data_sources) {
|
|
if (!async_producer_commands.IsBound()) {
|
|
PERFETTO_DLOG(
|
|
"The Service tried to request an incremental state invalidation, but "
|
|
"the remote Producer has not yet initialized the connection");
|
|
return;
|
|
}
|
|
auto cmd = ipc::AsyncResult<protos::gen::GetAsyncCommandResponse>::Create();
|
|
cmd.set_has_more(true);
|
|
for (size_t i = 0; i < num_data_sources; i++)
|
|
cmd->mutable_clear_incremental_state()->add_data_source_ids(
|
|
data_source_ids[i]);
|
|
async_producer_commands.Resolve(std::move(cmd));
|
|
}
|
|
|
|
} // namespace perfetto
|
|
// gen_amalgamated begin source: src/tracing/ipc/service/service_ipc_host_impl.cc
|
|
// gen_amalgamated begin header: src/tracing/ipc/service/service_ipc_host_impl.h
|
|
// gen_amalgamated begin header: include/perfetto/ext/tracing/ipc/service_ipc_host.h
|
|
/*
|
|
* Copyright (C) 2017 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
#ifndef INCLUDE_PERFETTO_EXT_TRACING_IPC_SERVICE_IPC_HOST_H_
|
|
#define INCLUDE_PERFETTO_EXT_TRACING_IPC_SERVICE_IPC_HOST_H_
|
|
|
|
#include <memory>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/base/export.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/scoped_file.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/basic_types.h"
|
|
|
|
namespace perfetto {
|
|
namespace base {
|
|
class TaskRunner;
|
|
} // namespace base.
|
|
|
|
class TracingService;
|
|
|
|
// Creates an instance of the service (business logic + UNIX socket transport).
|
|
// Exposed to:
|
|
// The code in the tracing client that will host the service e.g., traced.
|
|
// Implemented in:
|
|
// src/tracing/ipc/service/service_ipc_host_impl.cc
|
|
class PERFETTO_EXPORT ServiceIPCHost {
|
|
public:
|
|
static std::unique_ptr<ServiceIPCHost> CreateInstance(base::TaskRunner*);
|
|
virtual ~ServiceIPCHost();
|
|
|
|
// Start listening on the Producer & Consumer ports. Returns false in case of
|
|
// failure (e.g., something else is listening on |socket_name|).
|
|
virtual bool Start(const char* producer_socket_name,
|
|
const char* consumer_socket_name) = 0;
|
|
|
|
// Like the above, but takes two file descriptors to already bound sockets.
|
|
// This is used when building as part of the Android tree, where init opens
|
|
// and binds the socket beore exec()-ing us.
|
|
virtual bool Start(base::ScopedFile producer_socket_fd,
|
|
base::ScopedFile consumer_socket_fd) = 0;
|
|
|
|
virtual TracingService* service() const = 0;
|
|
|
|
protected:
|
|
ServiceIPCHost();
|
|
|
|
private:
|
|
ServiceIPCHost(const ServiceIPCHost&) = delete;
|
|
ServiceIPCHost& operator=(const ServiceIPCHost&) = delete;
|
|
};
|
|
|
|
} // namespace perfetto
|
|
|
|
#endif // INCLUDE_PERFETTO_EXT_TRACING_IPC_SERVICE_IPC_HOST_H_
|
|
/*
|
|
* Copyright (C) 2017 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
#ifndef SRC_TRACING_IPC_SERVICE_SERVICE_IPC_HOST_IMPL_H_
|
|
#define SRC_TRACING_IPC_SERVICE_SERVICE_IPC_HOST_IMPL_H_
|
|
|
|
#include <memory>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/ext/tracing/ipc/service_ipc_host.h"
|
|
|
|
namespace perfetto {
|
|
|
|
namespace ipc {
|
|
class Host;
|
|
}
|
|
|
|
// The implementation of the IPC host for the tracing service. This class does
|
|
// very few things: it mostly initializes the IPC transport. The actual
|
|
// implementation of the IPC <> Service business logic glue lives in
|
|
// producer_ipc_service.cc and consumer_ipc_service.cc.
|
|
class ServiceIPCHostImpl : public ServiceIPCHost {
|
|
public:
|
|
ServiceIPCHostImpl(base::TaskRunner*);
|
|
~ServiceIPCHostImpl() override;
|
|
|
|
// ServiceIPCHost implementation.
|
|
bool Start(const char* producer_socket_name,
|
|
const char* consumer_socket_name) override;
|
|
bool Start(base::ScopedFile producer_socket_fd,
|
|
base::ScopedFile consumer_socket_fd) override;
|
|
|
|
TracingService* service() const override;
|
|
|
|
private:
|
|
bool DoStart();
|
|
void Shutdown();
|
|
|
|
base::TaskRunner* const task_runner_;
|
|
std::unique_ptr<TracingService> svc_; // The service business logic.
|
|
|
|
// The IPC host that listens on the Producer socket. It owns the
|
|
// PosixServiceProducerPort instance which deals with all producers' IPC(s).
|
|
std::unique_ptr<ipc::Host> producer_ipc_port_;
|
|
|
|
// As above, but for the Consumer port.
|
|
std::unique_ptr<ipc::Host> consumer_ipc_port_;
|
|
};
|
|
|
|
} // namespace perfetto
|
|
|
|
#endif // SRC_TRACING_IPC_SERVICE_SERVICE_IPC_HOST_IMPL_H_
|
|
/*
|
|
* Copyright (C) 2017 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
// gen_amalgamated expanded: #include "src/tracing/ipc/service/service_ipc_host_impl.h"
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/base/logging.h"
|
|
// gen_amalgamated expanded: #include "perfetto/base/task_runner.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/ipc/host.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/tracing_service.h"
|
|
// gen_amalgamated expanded: #include "src/tracing/ipc/posix_shared_memory.h"
|
|
// gen_amalgamated expanded: #include "src/tracing/ipc/service/consumer_ipc_service.h"
|
|
// gen_amalgamated expanded: #include "src/tracing/ipc/service/producer_ipc_service.h"
|
|
|
|
namespace perfetto {
|
|
|
|
// TODO(fmayer): implement per-uid connection limit (b/69093705).
|
|
|
|
// Implements the publicly exposed factory method declared in
|
|
// include/tracing/posix_ipc/posix_service_host.h.
|
|
std::unique_ptr<ServiceIPCHost> ServiceIPCHost::CreateInstance(
|
|
base::TaskRunner* task_runner) {
|
|
return std::unique_ptr<ServiceIPCHost>(new ServiceIPCHostImpl(task_runner));
|
|
}
|
|
|
|
ServiceIPCHostImpl::ServiceIPCHostImpl(base::TaskRunner* task_runner)
|
|
: task_runner_(task_runner) {}
|
|
|
|
ServiceIPCHostImpl::~ServiceIPCHostImpl() {}
|
|
|
|
bool ServiceIPCHostImpl::Start(const char* producer_socket_name,
|
|
const char* consumer_socket_name) {
|
|
PERFETTO_CHECK(!svc_); // Check if already started.
|
|
|
|
// Initialize the IPC transport.
|
|
producer_ipc_port_ =
|
|
ipc::Host::CreateInstance(producer_socket_name, task_runner_);
|
|
consumer_ipc_port_ =
|
|
ipc::Host::CreateInstance(consumer_socket_name, task_runner_);
|
|
return DoStart();
|
|
}
|
|
|
|
bool ServiceIPCHostImpl::Start(base::ScopedFile producer_socket_fd,
|
|
base::ScopedFile consumer_socket_fd) {
|
|
PERFETTO_CHECK(!svc_); // Check if already started.
|
|
|
|
// Initialize the IPC transport.
|
|
producer_ipc_port_ =
|
|
ipc::Host::CreateInstance(std::move(producer_socket_fd), task_runner_);
|
|
consumer_ipc_port_ =
|
|
ipc::Host::CreateInstance(std::move(consumer_socket_fd), task_runner_);
|
|
return DoStart();
|
|
}
|
|
|
|
bool ServiceIPCHostImpl::DoStart() {
|
|
// Create and initialize the platform-independent tracing business logic.
|
|
std::unique_ptr<SharedMemory::Factory> shm_factory(
|
|
new PosixSharedMemory::Factory());
|
|
svc_ = TracingService::CreateInstance(std::move(shm_factory), task_runner_);
|
|
|
|
if (!producer_ipc_port_ || !consumer_ipc_port_) {
|
|
Shutdown();
|
|
return false;
|
|
}
|
|
|
|
// TODO(fmayer): add a test that destroyes the ServiceIPCHostImpl soon after
|
|
// Start() and checks that no spurious callbacks are issued.
|
|
bool producer_service_exposed = producer_ipc_port_->ExposeService(
|
|
std::unique_ptr<ipc::Service>(new ProducerIPCService(svc_.get())));
|
|
PERFETTO_CHECK(producer_service_exposed);
|
|
|
|
bool consumer_service_exposed = consumer_ipc_port_->ExposeService(
|
|
std::unique_ptr<ipc::Service>(new ConsumerIPCService(svc_.get())));
|
|
PERFETTO_CHECK(consumer_service_exposed);
|
|
|
|
return true;
|
|
}
|
|
|
|
TracingService* ServiceIPCHostImpl::service() const {
|
|
return svc_.get();
|
|
}
|
|
|
|
void ServiceIPCHostImpl::Shutdown() {
|
|
// TODO(primiano): add a test that causes the Shutdown() and checks that no
|
|
// spurious callbacks are issued.
|
|
producer_ipc_port_.reset();
|
|
consumer_ipc_port_.reset();
|
|
svc_.reset();
|
|
}
|
|
|
|
// Definitions for the base class ctor/dtor.
|
|
ServiceIPCHost::ServiceIPCHost() = default;
|
|
ServiceIPCHost::~ServiceIPCHost() = default;
|
|
|
|
} // namespace perfetto
|
|
// gen_amalgamated begin source: src/tracing/internal/system_tracing_backend.cc
|
|
/*
|
|
* Copyright (C) 2019 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/tracing/internal/system_tracing_backend.h"
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/base/logging.h"
|
|
// gen_amalgamated expanded: #include "perfetto/base/task_runner.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/tracing_service.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/tracing/ipc/default_socket.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/tracing/ipc/producer_ipc_client.h"
|
|
|
|
namespace perfetto {
|
|
namespace internal {
|
|
|
|
// static
|
|
TracingBackend* SystemTracingBackend::GetInstance() {
|
|
static auto* instance = new SystemTracingBackend();
|
|
return instance;
|
|
}
|
|
|
|
SystemTracingBackend::SystemTracingBackend() {}
|
|
|
|
std::unique_ptr<ProducerEndpoint> SystemTracingBackend::ConnectProducer(
|
|
const ConnectProducerArgs& args) {
|
|
PERFETTO_DCHECK(args.task_runner->RunsTasksOnCurrentThread());
|
|
|
|
auto endpoint = ProducerIPCClient::Connect(
|
|
GetProducerSocket(), args.producer, args.producer_name, args.task_runner,
|
|
TracingService::ProducerSMBScrapingMode::kEnabled,
|
|
args.shmem_size_hint_bytes, args.shmem_page_size_hint_bytes, nullptr,
|
|
nullptr, ProducerIPCClient::ConnectionFlags::kRetryIfUnreachable);
|
|
PERFETTO_CHECK(endpoint);
|
|
return endpoint;
|
|
}
|
|
|
|
std::unique_ptr<ConsumerEndpoint> SystemTracingBackend::ConnectConsumer(
|
|
const ConnectConsumerArgs&) {
|
|
PERFETTO_FATAL(
|
|
"Trace session creation is not supported yet when using the system "
|
|
"tracing backend. Use the perfetto cmdline client instead to start "
|
|
"system-wide tracing sessions");
|
|
}
|
|
|
|
} // namespace internal
|
|
} // namespace perfetto
|
|
// gen_amalgamated begin source: src/tracing/platform_posix.cc
|
|
/*
|
|
* Copyright (C) 2019 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/base/build_config.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/file_utils.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/thread_task_runner.h"
|
|
// gen_amalgamated expanded: #include "perfetto/tracing/internal/tracing_tls.h"
|
|
// gen_amalgamated expanded: #include "perfetto/tracing/platform.h"
|
|
// gen_amalgamated expanded: #include "perfetto/tracing/trace_writer_base.h"
|
|
|
|
#include <pthread.h>
|
|
#include <stdlib.h>
|
|
|
|
namespace perfetto {
|
|
|
|
namespace {
|
|
|
|
class PlatformPosix : public Platform {
|
|
public:
|
|
PlatformPosix();
|
|
~PlatformPosix() override;
|
|
|
|
ThreadLocalObject* GetOrCreateThreadLocalObject() override;
|
|
std::unique_ptr<base::TaskRunner> CreateTaskRunner(
|
|
const CreateTaskRunnerArgs&) override;
|
|
std::string GetCurrentProcessName() override;
|
|
|
|
private:
|
|
pthread_key_t tls_key_{};
|
|
};
|
|
|
|
// TODO(primiano): make base::ThreadTaskRunner directly inherit TaskRunner, so
|
|
// we can avoid this boilerplate.
|
|
class TaskRunnerInstance : public base::TaskRunner {
|
|
public:
|
|
TaskRunnerInstance();
|
|
~TaskRunnerInstance() override;
|
|
|
|
void PostTask(std::function<void()>) override;
|
|
void PostDelayedTask(std::function<void()>, uint32_t delay_ms) override;
|
|
void AddFileDescriptorWatch(int fd, std::function<void()>) override;
|
|
void RemoveFileDescriptorWatch(int fd) override;
|
|
bool RunsTasksOnCurrentThread() const override;
|
|
|
|
private:
|
|
base::ThreadTaskRunner thread_task_runner_;
|
|
};
|
|
|
|
using ThreadLocalObject = Platform::ThreadLocalObject;
|
|
|
|
PlatformPosix::PlatformPosix() {
|
|
auto tls_dtor = [](void* obj) {
|
|
delete static_cast<ThreadLocalObject*>(obj);
|
|
};
|
|
PERFETTO_CHECK(pthread_key_create(&tls_key_, tls_dtor) == 0);
|
|
}
|
|
|
|
PlatformPosix::~PlatformPosix() {
|
|
pthread_key_delete(tls_key_);
|
|
}
|
|
|
|
ThreadLocalObject* PlatformPosix::GetOrCreateThreadLocalObject() {
|
|
// In chromium this should be implemented using base::ThreadLocalStorage.
|
|
auto tls = static_cast<ThreadLocalObject*>(pthread_getspecific(tls_key_));
|
|
if (!tls) {
|
|
tls = ThreadLocalObject::CreateInstance().release();
|
|
pthread_setspecific(tls_key_, tls);
|
|
}
|
|
return tls;
|
|
}
|
|
|
|
std::unique_ptr<base::TaskRunner> PlatformPosix::CreateTaskRunner(
|
|
const CreateTaskRunnerArgs&) {
|
|
return std::unique_ptr<base::TaskRunner>(new TaskRunnerInstance());
|
|
}
|
|
|
|
std::string PlatformPosix::GetCurrentProcessName() {
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_OS_LINUX) || \
|
|
PERFETTO_BUILDFLAG(PERFETTO_OS_ANDROID)
|
|
std::string cmdline;
|
|
base::ReadFile("/proc/self/cmdline", &cmdline);
|
|
return cmdline.substr(0, cmdline.find('\0'));
|
|
#elif PERFETTO_BUILDFLAG(PERFETTO_OS_APPLE)
|
|
return std::string(getprogname());
|
|
#else
|
|
return "unknown_producer";
|
|
#endif
|
|
}
|
|
|
|
TaskRunnerInstance::TaskRunnerInstance()
|
|
: thread_task_runner_(base::ThreadTaskRunner::CreateAndStart()) {}
|
|
TaskRunnerInstance::~TaskRunnerInstance() = default;
|
|
void TaskRunnerInstance::PostTask(std::function<void()> func) {
|
|
thread_task_runner_.get()->PostTask(func);
|
|
}
|
|
|
|
void TaskRunnerInstance::PostDelayedTask(std::function<void()> func,
|
|
uint32_t delay_ms) {
|
|
thread_task_runner_.get()->PostDelayedTask(func, delay_ms);
|
|
}
|
|
|
|
void TaskRunnerInstance::AddFileDescriptorWatch(int fd,
|
|
std::function<void()> func) {
|
|
thread_task_runner_.get()->AddFileDescriptorWatch(fd, func);
|
|
}
|
|
|
|
void TaskRunnerInstance::RemoveFileDescriptorWatch(int fd) {
|
|
thread_task_runner_.get()->RemoveFileDescriptorWatch(fd);
|
|
}
|
|
|
|
bool TaskRunnerInstance::RunsTasksOnCurrentThread() const {
|
|
return thread_task_runner_.get()->RunsTasksOnCurrentThread();
|
|
}
|
|
|
|
} // namespace
|
|
|
|
// static
|
|
Platform* Platform::GetDefaultPlatform() {
|
|
static PlatformPosix* instance = new PlatformPosix();
|
|
return instance;
|
|
}
|
|
|
|
} // namespace perfetto
|
|
|