Commit 5b4ef781 authored by Anuj Phogat's avatar Anuj Phogat Committed by Marge Bot
Browse files

intel: Fix alignment and line wrapping due to gen_perf renaming


Signed-off-by: Anuj Phogat's avatarAnuj Phogat <anuj.phogat@gmail.com>
Reviewed-by: Kenneth Graunke's avatarKenneth Graunke <kenneth@whitecape.org>
Part-of: <mesa/mesa!10241>
parent bbe81292
......@@ -84,7 +84,8 @@ iris_new_perf_query_obj(struct pipe_context *pipe, unsigned query_index)
{
struct iris_context *ice = (void *) pipe;
struct intel_perf_context *perf_ctx = ice->perf_ctx;
struct intel_perf_query_object * obj = intel_perf_new_query(perf_ctx, query_index);
struct intel_perf_query_object * obj =
intel_perf_new_query(perf_ctx, query_index);
if (unlikely(!obj))
return NULL;
......@@ -167,7 +168,8 @@ iris_get_perf_counter_info(struct pipe_context *pipe,
struct intel_perf_context *perf_ctx = ice->perf_ctx;
struct intel_perf_config *perf_cfg = intel_perf_config(perf_ctx);
const struct intel_perf_query_info *info = &perf_cfg->queries[query_index];
const struct intel_perf_query_counter *counter = &info->counters[counter_index];
const struct intel_perf_query_counter *counter =
&info->counters[counter_index];
*name = counter->name;
*desc = counter->desc;
......@@ -200,7 +202,8 @@ iris_is_perf_query_ready(struct pipe_context *pipe, struct pipe_query *q)
if (perf_query->base.Ready)
return true;
return intel_perf_is_query_ready(perf_ctx, obj, &ice->batches[IRIS_BATCH_RENDER]);
return intel_perf_is_query_ready(perf_ctx, obj,
&ice->batches[IRIS_BATCH_RENDER]);
}
static void
......
......@@ -306,8 +306,8 @@ i915_query_perf_config_data(struct intel_perf_config *perf,
bool
intel_perf_load_metric_id(struct intel_perf_config *perf_cfg,
const char *guid,
uint64_t *metric_id)
const char *guid,
uint64_t *metric_id)
{
char config_path[280];
......@@ -512,76 +512,76 @@ load_pipeline_statistic_metrics(struct intel_perf_config *perf_cfg,
query->name = "Pipeline Statistics Registers";
intel_perf_query_add_basic_stat_reg(query, IA_VERTICES_COUNT,
"N vertices submitted");
"N vertices submitted");
intel_perf_query_add_basic_stat_reg(query, IA_PRIMITIVES_COUNT,
"N primitives submitted");
"N primitives submitted");
intel_perf_query_add_basic_stat_reg(query, VS_INVOCATION_COUNT,
"N vertex shader invocations");
"N vertex shader invocations");
if (devinfo->ver == 6) {
intel_perf_query_add_stat_reg(query, GFX6_SO_PRIM_STORAGE_NEEDED, 1, 1,
"SO_PRIM_STORAGE_NEEDED",
"N geometry shader stream-out primitives (total)");
"SO_PRIM_STORAGE_NEEDED",
"N geometry shader stream-out primitives (total)");
intel_perf_query_add_stat_reg(query, GFX6_SO_NUM_PRIMS_WRITTEN, 1, 1,
"SO_NUM_PRIMS_WRITTEN",
"N geometry shader stream-out primitives (written)");
"SO_NUM_PRIMS_WRITTEN",
"N geometry shader stream-out primitives (written)");
} else {
intel_perf_query_add_stat_reg(query, GFX7_SO_PRIM_STORAGE_NEEDED(0), 1, 1,
"SO_PRIM_STORAGE_NEEDED (Stream 0)",
"N stream-out (stream 0) primitives (total)");
"SO_PRIM_STORAGE_NEEDED (Stream 0)",
"N stream-out (stream 0) primitives (total)");
intel_perf_query_add_stat_reg(query, GFX7_SO_PRIM_STORAGE_NEEDED(1), 1, 1,
"SO_PRIM_STORAGE_NEEDED (Stream 1)",
"N stream-out (stream 1) primitives (total)");
"SO_PRIM_STORAGE_NEEDED (Stream 1)",
"N stream-out (stream 1) primitives (total)");
intel_perf_query_add_stat_reg(query, GFX7_SO_PRIM_STORAGE_NEEDED(2), 1, 1,
"SO_PRIM_STORAGE_NEEDED (Stream 2)",
"N stream-out (stream 2) primitives (total)");
"SO_PRIM_STORAGE_NEEDED (Stream 2)",
"N stream-out (stream 2) primitives (total)");
intel_perf_query_add_stat_reg(query, GFX7_SO_PRIM_STORAGE_NEEDED(3), 1, 1,
"SO_PRIM_STORAGE_NEEDED (Stream 3)",
"N stream-out (stream 3) primitives (total)");
"SO_PRIM_STORAGE_NEEDED (Stream 3)",
"N stream-out (stream 3) primitives (total)");
intel_perf_query_add_stat_reg(query, GFX7_SO_NUM_PRIMS_WRITTEN(0), 1, 1,
"SO_NUM_PRIMS_WRITTEN (Stream 0)",
"N stream-out (stream 0) primitives (written)");
"SO_NUM_PRIMS_WRITTEN (Stream 0)",
"N stream-out (stream 0) primitives (written)");
intel_perf_query_add_stat_reg(query, GFX7_SO_NUM_PRIMS_WRITTEN(1), 1, 1,
"SO_NUM_PRIMS_WRITTEN (Stream 1)",
"N stream-out (stream 1) primitives (written)");
"SO_NUM_PRIMS_WRITTEN (Stream 1)",
"N stream-out (stream 1) primitives (written)");
intel_perf_query_add_stat_reg(query, GFX7_SO_NUM_PRIMS_WRITTEN(2), 1, 1,
"SO_NUM_PRIMS_WRITTEN (Stream 2)",
"N stream-out (stream 2) primitives (written)");
"SO_NUM_PRIMS_WRITTEN (Stream 2)",
"N stream-out (stream 2) primitives (written)");
intel_perf_query_add_stat_reg(query, GFX7_SO_NUM_PRIMS_WRITTEN(3), 1, 1,
"SO_NUM_PRIMS_WRITTEN (Stream 3)",
"N stream-out (stream 3) primitives (written)");
"SO_NUM_PRIMS_WRITTEN (Stream 3)",
"N stream-out (stream 3) primitives (written)");
}
intel_perf_query_add_basic_stat_reg(query, HS_INVOCATION_COUNT,
"N TCS shader invocations");
"N TCS shader invocations");
intel_perf_query_add_basic_stat_reg(query, DS_INVOCATION_COUNT,
"N TES shader invocations");
"N TES shader invocations");
intel_perf_query_add_basic_stat_reg(query, GS_INVOCATION_COUNT,
"N geometry shader invocations");
"N geometry shader invocations");
intel_perf_query_add_basic_stat_reg(query, GS_PRIMITIVES_COUNT,
"N geometry shader primitives emitted");
"N geometry shader primitives emitted");
intel_perf_query_add_basic_stat_reg(query, CL_INVOCATION_COUNT,
"N primitives entering clipping");
"N primitives entering clipping");
intel_perf_query_add_basic_stat_reg(query, CL_PRIMITIVES_COUNT,
"N primitives leaving clipping");
"N primitives leaving clipping");
if (devinfo->is_haswell || devinfo->ver == 8) {
intel_perf_query_add_stat_reg(query, PS_INVOCATION_COUNT, 1, 4,
"N fragment shader invocations",
"N fragment shader invocations");
"N fragment shader invocations",
"N fragment shader invocations");
} else {
intel_perf_query_add_basic_stat_reg(query, PS_INVOCATION_COUNT,
"N fragment shader invocations");
"N fragment shader invocations");
}
intel_perf_query_add_basic_stat_reg(query, PS_DEPTH_COUNT,
"N z-pass fragments");
"N z-pass fragments");
if (devinfo->ver >= 7) {
intel_perf_query_add_basic_stat_reg(query, CS_INVOCATION_COUNT,
"N compute shader invocations");
"N compute shader invocations");
}
query->data_size = sizeof(uint64_t) * query->n_counters;
......@@ -829,8 +829,8 @@ intel_perf_load_configuration(struct intel_perf_config *perf_cfg, int fd, const
uint64_t
intel_perf_store_configuration(struct intel_perf_config *perf_cfg, int fd,
const struct intel_perf_registers *config,
const char *guid)
const struct intel_perf_registers *config,
const char *guid)
{
if (guid)
return i915_add_config(perf_cfg, fd, config, guid);
......@@ -910,9 +910,9 @@ get_passes_mask(struct intel_perf_config *perf,
uint32_t
intel_perf_get_n_passes(struct intel_perf_config *perf,
const uint32_t *counter_indices,
uint32_t counter_indices_count,
struct intel_perf_query_info **pass_queries)
const uint32_t *counter_indices,
uint32_t counter_indices_count,
struct intel_perf_query_info **pass_queries)
{
uint64_t queries_mask = get_passes_mask(perf, counter_indices, counter_indices_count);
......@@ -929,9 +929,9 @@ intel_perf_get_n_passes(struct intel_perf_config *perf,
void
intel_perf_get_counters_passes(struct intel_perf_config *perf,
const uint32_t *counter_indices,
uint32_t counter_indices_count,
struct intel_perf_counter_pass *counter_pass)
const uint32_t *counter_indices,
uint32_t counter_indices_count,
struct intel_perf_counter_pass *counter_pass)
{
uint64_t queries_mask = get_passes_mask(perf, counter_indices, counter_indices_count);
ASSERTED uint32_t n_passes = __builtin_popcount(queries_mask);
......@@ -1014,9 +1014,9 @@ gfx8_read_report_clock_ratios(const uint32_t *report,
void
intel_perf_query_result_read_frequencies(struct intel_perf_query_result *result,
const struct intel_device_info *devinfo,
const uint32_t *start,
const uint32_t *end)
const struct intel_device_info *devinfo,
const uint32_t *start,
const uint32_t *end)
{
/* Slice/Unslice frequency is only available in the OA reports when the
* "Disable OA reports due to clock ratio change" field in
......@@ -1045,10 +1045,10 @@ can_use_mi_rpc_bc_counters(const struct intel_device_info *devinfo)
void
intel_perf_query_result_accumulate(struct intel_perf_query_result *result,
const struct intel_perf_query_info *query,
const struct intel_device_info *devinfo,
const uint32_t *start,
const uint32_t *end)
const struct intel_perf_query_info *query,
const struct intel_device_info *devinfo,
const uint32_t *start,
const uint32_t *end)
{
int i;
......@@ -1112,9 +1112,9 @@ intel_perf_query_result_accumulate(struct intel_perf_query_result *result,
void
intel_perf_query_result_read_gt_frequency(struct intel_perf_query_result *result,
const struct intel_device_info *devinfo,
const uint32_t start,
const uint32_t end)
const struct intel_device_info *devinfo,
const uint32_t start,
const uint32_t end)
{
switch (devinfo->ver) {
case 7:
......@@ -1139,9 +1139,9 @@ intel_perf_query_result_read_gt_frequency(struct intel_perf_query_result *result
void
intel_perf_query_result_read_perfcnts(struct intel_perf_query_result *result,
const struct intel_perf_query_info *query,
const uint64_t *start,
const uint64_t *end)
const struct intel_perf_query_info *query,
const uint64_t *start,
const uint64_t *end)
{
for (uint32_t i = 0; i < 2; i++) {
uint64_t v0 = start[i] & PERF_CNT_VALUE_MASK;
......@@ -1173,11 +1173,11 @@ query_accumulator_offset(const struct intel_perf_query_info *query,
void
intel_perf_query_result_accumulate_fields(struct intel_perf_query_result *result,
const struct intel_perf_query_info *query,
const struct intel_device_info *devinfo,
const void *start,
const void *end,
bool no_oa_accumulate)
const struct intel_perf_query_info *query,
const struct intel_device_info *devinfo,
const void *start,
const void *end,
bool no_oa_accumulate)
{
struct intel_perf_query_field_layout *layout = &query->perf->query_layout;
......@@ -1194,8 +1194,8 @@ intel_perf_query_result_accumulate_fields(struct intel_perf_query_result *result
*/
if (!no_oa_accumulate) {
intel_perf_query_result_accumulate(result, query, devinfo,
start + field->location,
end + field->location);
start + field->location,
end + field->location);
}
} else {
uint64_t v0, v1;
......@@ -1234,8 +1234,8 @@ intel_perf_query_result_clear(struct intel_perf_query_result *result)
void
intel_perf_query_result_print_fields(const struct intel_perf_query_info *query,
const struct intel_device_info *devinfo,
const void *data)
const struct intel_device_info *devinfo,
const void *data)
{
const struct intel_perf_query_field_layout *layout = &query->perf->query_layout;
......@@ -1299,7 +1299,7 @@ add_query_register(struct intel_perf_query_field_layout *layout,
static void
intel_perf_init_query_fields(struct intel_perf_config *perf_cfg,
const struct intel_device_info *devinfo)
const struct intel_device_info *devinfo)
{
struct intel_perf_query_field_layout *layout = &perf_cfg->query_layout;
......@@ -1368,9 +1368,9 @@ intel_perf_init_query_fields(struct intel_perf_config *perf_cfg,
void
intel_perf_init_metrics(struct intel_perf_config *perf_cfg,
const struct intel_device_info *devinfo,
int drm_fd,
bool include_pipeline_statistics)
const struct intel_device_info *devinfo,
int drm_fd,
bool include_pipeline_statistics)
{
intel_perf_init_query_fields(perf_cfg, devinfo);
......
......@@ -383,20 +383,20 @@ struct intel_perf_counter_pass {
};
void intel_perf_init_metrics(struct intel_perf_config *perf_cfg,
const struct intel_device_info *devinfo,
int drm_fd,
bool include_pipeline_statistics);
const struct intel_device_info *devinfo,
int drm_fd,
bool include_pipeline_statistics);
/** Query i915 for a metric id using guid.
*/
bool intel_perf_load_metric_id(struct intel_perf_config *perf_cfg,
const char *guid,
uint64_t *metric_id);
const char *guid,
uint64_t *metric_id);
/** Load a configuation's content from i915 using a guid.
*/
struct intel_perf_registers *intel_perf_load_configuration(struct intel_perf_config *perf_cfg,
int fd, const char *guid);
int fd, const char *guid);
/** Store a configuration into i915 using guid and return a new metric id.
*
......@@ -404,56 +404,56 @@ struct intel_perf_registers *intel_perf_load_configuration(struct intel_perf_con
* content of the configuration.
*/
uint64_t intel_perf_store_configuration(struct intel_perf_config *perf_cfg, int fd,
const struct intel_perf_registers *config,
const char *guid);
const struct intel_perf_registers *config,
const char *guid);
/** Read the slice/unslice frequency from 2 OA reports and store then into
* result.
*/
void intel_perf_query_result_read_frequencies(struct intel_perf_query_result *result,
const struct intel_device_info *devinfo,
const uint32_t *start,
const uint32_t *end);
const struct intel_device_info *devinfo,
const uint32_t *start,
const uint32_t *end);
/** Store the GT frequency as reported by the RPSTAT register.
*/
void intel_perf_query_result_read_gt_frequency(struct intel_perf_query_result *result,
const struct intel_device_info *devinfo,
const uint32_t start,
const uint32_t end);
const struct intel_device_info *devinfo,
const uint32_t start,
const uint32_t end);
/** Store PERFCNT registers values.
*/
void intel_perf_query_result_read_perfcnts(struct intel_perf_query_result *result,
const struct intel_perf_query_info *query,
const uint64_t *start,
const uint64_t *end);
const struct intel_perf_query_info *query,
const uint64_t *start,
const uint64_t *end);
/** Accumulate the delta between 2 OA reports into result for a given query.
*/
void intel_perf_query_result_accumulate(struct intel_perf_query_result *result,
const struct intel_perf_query_info *query,
const struct intel_device_info *devinfo,
const uint32_t *start,
const uint32_t *end);
const struct intel_perf_query_info *query,
const struct intel_device_info *devinfo,
const uint32_t *start,
const uint32_t *end);
/** Accumulate the delta between 2 snapshots of OA perf registers (layout
* should match description specified through intel_perf_query_register_layout).
*/
void intel_perf_query_result_accumulate_fields(struct intel_perf_query_result *result,
const struct intel_perf_query_info *query,
const struct intel_device_info *devinfo,
const void *start,
const void *end,
bool no_oa_accumulate);
const struct intel_perf_query_info *query,
const struct intel_device_info *devinfo,
const void *start,
const void *end,
bool no_oa_accumulate);
void intel_perf_query_result_clear(struct intel_perf_query_result *result);
/** Debug helper printing out query data.
*/
void intel_perf_query_result_print_fields(const struct intel_perf_query_info *query,
const struct intel_device_info *devinfo,
const void *data);
const struct intel_device_info *devinfo,
const void *data);
static inline size_t
intel_perf_query_counter_get_size(const struct intel_perf_query_counter *counter)
......@@ -502,12 +502,12 @@ intel_perf_has_global_sseu(const struct intel_perf_config *perf)
}
uint32_t intel_perf_get_n_passes(struct intel_perf_config *perf,
const uint32_t *counter_indices,
uint32_t counter_indices_count,
struct intel_perf_query_info **pass_queries);
const uint32_t *counter_indices,
uint32_t counter_indices_count,
struct intel_perf_query_info **pass_queries);
void intel_perf_get_counters_passes(struct intel_perf_config *perf,
const uint32_t *counter_indices,
uint32_t counter_indices_count,
struct intel_perf_counter_pass *counter_pass);
const uint32_t *counter_indices,
uint32_t counter_indices_count,
struct intel_perf_counter_pass *counter_pass);
#endif /* INTEL_PERF_H */
......@@ -33,9 +33,9 @@
int
intel_perf_query_result_write_mdapi(void *data, uint32_t data_size,
const struct intel_device_info *devinfo,
const struct intel_perf_query_info *query,
const struct intel_perf_query_result *result)
const struct intel_device_info *devinfo,
const struct intel_perf_query_info *query,
const struct intel_perf_query_result *result)
{
switch (devinfo->ver) {
case 7: {
......@@ -138,7 +138,7 @@ intel_perf_query_result_write_mdapi(void *data, uint32_t data_size,
void
intel_perf_register_mdapi_statistic_query(struct intel_perf_config *perf_cfg,
const struct intel_device_info *devinfo)
const struct intel_device_info *devinfo)
{
if (!(devinfo->ver >= 7 && devinfo->ver <= 12))
return;
......@@ -230,7 +230,7 @@ fill_mdapi_perf_query_counter(struct intel_perf_query_info *query,
void
intel_perf_register_mdapi_oa_query(struct intel_perf_config *perf,
const struct intel_device_info *devinfo)
const struct intel_device_info *devinfo)
{
struct intel_perf_query_info *query = NULL;
......
......@@ -128,13 +128,13 @@ struct mdapi_pipeline_metrics {
};
int intel_perf_query_result_write_mdapi(void *data, uint32_t data_size,
const struct intel_device_info *devinfo,
const struct intel_perf_query_info *query,
const struct intel_perf_query_result *result);
const struct intel_device_info *devinfo,
const struct intel_perf_query_info *query,
const struct intel_perf_query_result *result);
static inline void intel_perf_query_mdapi_write_marker(void *data, uint32_t data_size,
const struct intel_device_info *devinfo,
uint64_t value)
const struct intel_device_info *devinfo,
uint64_t value)
{
switch (devinfo->ver) {
case 8: {
......
......@@ -38,8 +38,8 @@ static inline uint64_t to_const_user_pointer(const void *ptr)
static inline void
intel_perf_query_add_stat_reg(struct intel_perf_query_info *query, uint32_t reg,
uint32_t numerator, uint32_t denominator,
const char *name, const char *description)
uint32_t numerator, uint32_t denominator,
const char *name, const char *description)
{
struct intel_perf_query_counter *counter;
......@@ -60,7 +60,7 @@ intel_perf_query_add_stat_reg(struct intel_perf_query_info *query, uint32_t reg,
static inline void
intel_perf_query_add_basic_stat_reg(struct intel_perf_query_info *query,
uint32_t reg, const char *name)
uint32_t reg, const char *name)
{
intel_perf_query_add_stat_reg(query, reg, 1, 1, name, name);
}
......@@ -88,9 +88,9 @@ intel_perf_append_query_info(struct intel_perf_config *perf, int max_counters)
}
void intel_perf_register_mdapi_statistic_query(struct intel_perf_config *perf_cfg,
const struct intel_device_info *devinfo);
const struct intel_device_info *devinfo);
void intel_perf_register_mdapi_oa_query(struct intel_perf_config *perf,
const struct intel_device_info *devinfo);
const struct intel_device_info *devinfo);
#endif /* INTEL_PERF_PRIVATE_H */
......@@ -347,7 +347,7 @@ dec_n_users(struct intel_perf_context *perf_ctx)
static void
intel_perf_close(struct intel_perf_context *perfquery,
const struct intel_perf_query_info *query)
const struct intel_perf_query_info *query)
{
if (perfquery->oa_stream_fd != -1) {
close(perfquery->oa_stream_fd);
......@@ -362,11 +362,11 @@ intel_perf_close(struct intel_perf_context *perfquery,
static bool
intel_perf_open(struct intel_perf_context *perf_ctx,
int metrics_set_id,
int report_format,
int period_exponent,
int drm_fd,
uint32_t ctx_id)
int metrics_set_id,
int report_format,
int period_exponent,
int drm_fd,
uint32_t ctx_id)
{
uint64_t properties[DRM_I915_PERF_PROP_MAX * 2];
uint32_t p = 0;
......@@ -526,7 +526,7 @@ intel_perf_new_query(struct intel_perf_context *perf_ctx, unsigned query_index)
int
intel_perf_active_queries(struct intel_perf_context *perf_ctx,
const struct intel_perf_query_info *query)
const struct intel_perf_query_info *query)
{
assert(perf_ctx->n_active_oa_queries == 0 || perf_ctx->n_active_pipeline_stats_queries == 0);
......@@ -569,13 +569,13 @@ intel_perf_config(struct intel_perf_context *ctx)
void
intel_perf_init_context(struct intel_perf_context *perf_ctx,
struct intel_perf_config *perf_cfg,
void * mem_ctx, /* ralloc context */
void * ctx, /* driver context (eg, brw_context) */
void * bufmgr, /* eg brw_bufmgr */
const struct intel_device_info *devinfo,
uint32_t hw_ctx,
int drm_fd)
struct intel_perf_config *perf_cfg,
void * mem_ctx, /* ralloc context */
void * ctx, /* driver context (eg, brw_context) */
void * bufmgr, /* eg brw_bufmgr */
const struct intel_device_info *devinfo,
uint32_t hw_ctx,
int drm_fd)
{
perf_ctx->perf = perf_cfg;
perf_ctx->mem_ctx = mem_ctx;
......@@ -689,7 +689,7 @@ snapshot_query_layout(struct intel_perf_context *perf_ctx,
bool
intel_perf_begin_query(struct intel_perf_context *perf_ctx,
struct intel_perf_query_object *query)
struct intel_perf_query_object *query)
{
struct intel_perf_config *perf_cfg = perf_ctx->perf;
const struct intel_perf_query_info *queryinfo = query->queryinfo;
......@@ -900,7 +900,7 @@ intel_perf_begin_query(struct intel_perf_context *perf_ctx,
void
intel_perf_end_query(struct intel_perf_context *perf_ctx,
struct intel_perf_query_object *query)
struct intel_perf_query_object *query)
{
struct intel_perf_config *perf_cfg = perf_ctx->perf;
......@@ -1069,8 +1069,8 @@ read_oa_samples_for_query(struct intel_perf_context *perf_ctx,
void
intel_perf_wait_query(struct intel_perf_context *perf_ctx,
struct intel_perf_query_object *query,
void *current_batch)
struct intel_perf_query_object *query,
void *current_batch)
{
struct intel_perf_config *perf_cfg = perf_ctx->perf;
struct brw_bo *bo = NULL;
......@@ -1104,8 +1104,8 @@ intel_perf_wait_query(struct intel_perf_context *perf_ctx,
bool
intel_perf_is_query_ready(struct intel_perf_context *perf_ctx,
struct intel_perf_query_object *query,
void *current_batch)
struct intel_perf_query_object *query,
void *current_batch)
{
struct intel_perf_config *perf_cfg = perf_ctx->perf;
......@@ -1383,7 +1383,7 @@ error:
void
intel_perf_delete_query(struct intel_perf_context *perf_ctx,
struct intel_perf_query_object *query)
struct intel_perf_query_object *query)
{
struct intel_perf_config *perf_cfg = perf_ctx->perf;
......@@ -1511,11 +1511,11 @@ get_pipeline_stats_data(struct intel_perf_context *perf_ctx,
void
intel_perf_get_query_data(struct intel_perf_context *perf_ctx,
struct intel_perf_query_object *query,
void *current_batch,