evsel.c 66.2 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0-only
2
3
4
5
6
7
8
9
/*
 * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
 *
 * Parts came from builtin-{top,stat,record}.c, see those files for further
 * copyright notes.
 */

#include <byteswap.h>
10
11
#include <errno.h>
#include <inttypes.h>
12
#include <linux/bitops.h>
13
#include <api/fs/fs.h>
14
15
16
17
#include <api/fs/tracing_path.h>
#include <traceevent/event-parse.h>
#include <linux/hw_breakpoint.h>
#include <linux/perf_event.h>
18
#include <linux/compiler.h>
19
#include <linux/err.h>
20
#include <linux/zalloc.h>
21
#include <sys/ioctl.h>
22
#include <sys/resource.h>
23
24
#include <sys/types.h>
#include <dirent.h>
25
26
#include <stdlib.h>
#include <perf/evsel.h>
27
28
29
#include "asm/bug.h"
#include "callchain.h"
#include "cgroup.h"
30
#include "counts.h"
31
#include "event.h"
32
#include "evsel.h"
33
34
35
#include "util/env.h"
#include "util/evsel_config.h"
#include "util/evsel_fprintf.h"
36
#include "evlist.h"
37
#include <perf/cpumap.h>
38
39
40
#include "thread_map.h"
#include "target.h"
#include "perf_regs.h"
41
#include "record.h"
42
43
44
#include "debug.h"
#include "trace-event.h"
#include "stat.h"
45
#include "string2.h"
46
#include "memswap.h"
47
48
#include "util.h"
#include "../perf-sys.h"
49
#include "util/parse-branch-options.h"
50
51
#include <internal/xyarray.h>
#include <internal/lib.h>
52

53
#include <linux/ctype.h>
54

55
struct perf_missing_features perf_missing_features;
56
57
58

static clockid_t clockid;

59
static int perf_evsel__no_extra_init(struct evsel *evsel __maybe_unused)
60
61
62
63
{
	return 0;
}

64
65
void __weak test_attr__ready(void) { }

66
static void perf_evsel__no_extra_fini(struct evsel *evsel __maybe_unused)
67
68
69
70
71
{
}

static struct {
	size_t	size;
72
73
	int	(*init)(struct evsel *evsel);
	void	(*fini)(struct evsel *evsel);
74
} perf_evsel__object = {
75
	.size = sizeof(struct evsel),
76
77
78
79
80
	.init = perf_evsel__no_extra_init,
	.fini = perf_evsel__no_extra_fini,
};

int perf_evsel__object_config(size_t object_size,
81
82
			      int (*init)(struct evsel *evsel),
			      void (*fini)(struct evsel *evsel))
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
{

	if (object_size == 0)
		goto set_methods;

	if (perf_evsel__object.size > object_size)
		return -EINVAL;

	perf_evsel__object.size = object_size;

set_methods:
	if (init != NULL)
		perf_evsel__object.init = init;

	if (fini != NULL)
		perf_evsel__object.fini = fini;

	return 0;
}

103
#define FD(e, x, y) (*(int *)xyarray__entry(e->core.fd, x, y))
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126

int __perf_evsel__sample_size(u64 sample_type)
{
	u64 mask = sample_type & PERF_SAMPLE_MASK;
	int size = 0;
	int i;

	for (i = 0; i < 64; i++) {
		if (mask & (1ULL << i))
			size++;
	}

	size *= sizeof(u64);

	return size;
}

/**
 * __perf_evsel__calc_id_pos - calculate id_pos.
 * @sample_type: sample type
 *
 * This function returns the position of the event id (PERF_SAMPLE_ID or
 * PERF_SAMPLE_IDENTIFIER) in a sample event i.e. in the array of struct
127
 * perf_record_sample.
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
 */
static int __perf_evsel__calc_id_pos(u64 sample_type)
{
	int idx = 0;

	if (sample_type & PERF_SAMPLE_IDENTIFIER)
		return 0;

	if (!(sample_type & PERF_SAMPLE_ID))
		return -1;

	if (sample_type & PERF_SAMPLE_IP)
		idx += 1;

	if (sample_type & PERF_SAMPLE_TID)
		idx += 1;

	if (sample_type & PERF_SAMPLE_TIME)
		idx += 1;

	if (sample_type & PERF_SAMPLE_ADDR)
		idx += 1;

	return idx;
}

/**
 * __perf_evsel__calc_is_pos - calculate is_pos.
 * @sample_type: sample type
 *
 * This function returns the position (counting backwards) of the event id
 * (PERF_SAMPLE_ID or PERF_SAMPLE_IDENTIFIER) in a non-sample event i.e. if
 * sample_id_all is used there is an id sample appended to non-sample events.
 */
static int __perf_evsel__calc_is_pos(u64 sample_type)
{
	int idx = 1;

	if (sample_type & PERF_SAMPLE_IDENTIFIER)
		return 1;

	if (!(sample_type & PERF_SAMPLE_ID))
		return -1;

	if (sample_type & PERF_SAMPLE_CPU)
		idx += 1;

	if (sample_type & PERF_SAMPLE_STREAM_ID)
		idx += 1;

	return idx;
}

181
void perf_evsel__calc_id_pos(struct evsel *evsel)
182
{
183
184
	evsel->id_pos = __perf_evsel__calc_id_pos(evsel->core.attr.sample_type);
	evsel->is_pos = __perf_evsel__calc_is_pos(evsel->core.attr.sample_type);
185
186
}

187
void __perf_evsel__set_sample_bit(struct evsel *evsel,
188
189
				  enum perf_event_sample_format bit)
{
190
191
	if (!(evsel->core.attr.sample_type & bit)) {
		evsel->core.attr.sample_type |= bit;
192
193
194
195
196
		evsel->sample_size += sizeof(u64);
		perf_evsel__calc_id_pos(evsel);
	}
}

197
void __perf_evsel__reset_sample_bit(struct evsel *evsel,
198
199
				    enum perf_event_sample_format bit)
{
200
201
	if (evsel->core.attr.sample_type & bit) {
		evsel->core.attr.sample_type &= ~bit;
202
203
204
205
206
		evsel->sample_size -= sizeof(u64);
		perf_evsel__calc_id_pos(evsel);
	}
}

207
void perf_evsel__set_sample_id(struct evsel *evsel,
208
209
210
211
212
213
214
215
			       bool can_sample_identifier)
{
	if (can_sample_identifier) {
		perf_evsel__reset_sample_bit(evsel, ID);
		perf_evsel__set_sample_bit(evsel, IDENTIFIER);
	} else {
		perf_evsel__set_sample_bit(evsel, ID);
	}
216
	evsel->core.attr.read_format |= PERF_FORMAT_ID;
217
218
}

219
220
221
222
223
224
225
226
/**
 * perf_evsel__is_function_event - Return whether given evsel is a function
 * trace event
 *
 * @evsel - evsel selector to be tested
 *
 * Return %true if event is function trace event
 */
227
bool perf_evsel__is_function_event(struct evsel *evsel)
228
229
230
231
232
233
234
235
236
{
#define FUNCTION_EVENT "ftrace:function"

	return evsel->name &&
	       !strncmp(FUNCTION_EVENT, evsel->name, sizeof(FUNCTION_EVENT));

#undef FUNCTION_EVENT
}

237
238
void evsel__init(struct evsel *evsel,
		 struct perf_event_attr *attr, int idx)
239
{
240
	perf_evsel__init(&evsel->core, attr);
241
242
243
244
245
	evsel->idx	   = idx;
	evsel->tracking	   = !idx;
	evsel->leader	   = evsel;
	evsel->unit	   = "";
	evsel->scale	   = 1.0;
246
	evsel->max_events  = ULONG_MAX;
247
	evsel->evlist	   = NULL;
248
	evsel->bpf_obj	   = NULL;
249
250
251
252
253
254
	evsel->bpf_fd	   = -1;
	INIT_LIST_HEAD(&evsel->config_terms);
	perf_evsel__object.init(evsel);
	evsel->sample_size = __perf_evsel__sample_size(attr->sample_type);
	perf_evsel__calc_id_pos(evsel);
	evsel->cmdline_group_boundary = false;
255
256
257
258
	evsel->metric_expr   = NULL;
	evsel->metric_name   = NULL;
	evsel->metric_events = NULL;
	evsel->collect_stat  = false;
259
	evsel->pmu_name      = NULL;
260
261
}

262
struct evsel *perf_evsel__new_idx(struct perf_event_attr *attr, int idx)
263
{
264
	struct evsel *evsel = zalloc(perf_evsel__object.size);
265

266
267
	if (!evsel)
		return NULL;
268
	evsel__init(evsel, attr, idx);
269
270

	if (perf_evsel__is_bpf_output(evsel)) {
271
		evsel->core.attr.sample_type |= (PERF_SAMPLE_RAW | PERF_SAMPLE_TIME |
272
					    PERF_SAMPLE_CPU | PERF_SAMPLE_PERIOD),
273
		evsel->core.attr.sample_period = 1;
274
275
	}

276
277
278
279
280
281
282
283
284
285
286
	if (perf_evsel__is_clock(evsel)) {
		/*
		 * The evsel->unit points to static alias->unit
		 * so it's ok to use static string in here.
		 */
		static const char *unit = "msec";

		evsel->unit = unit;
		evsel->scale = 1e-6;
	}

287
288
289
	return evsel;
}

290
291
static bool perf_event_can_profile_kernel(void)
{
292
	return perf_event_paranoid_check(1);
293
294
}

295
struct evsel *perf_evsel__new_cycles(bool precise)
296
297
298
299
{
	struct perf_event_attr attr = {
		.type	= PERF_TYPE_HARDWARE,
		.config	= PERF_COUNT_HW_CPU_CYCLES,
300
		.exclude_kernel	= !perf_event_can_profile_kernel(),
301
	};
302
	struct evsel *evsel;
303
304

	event_attr_init(&attr);
305
306
307

	if (!precise)
		goto new_event;
308

309
310
311
312
	/*
	 * Now let the usual logic to set up the perf_event_attr defaults
	 * to kick in when we return and before perf_evsel__open() is called.
	 */
313
new_event:
314
	evsel = evsel__new(&attr);
315
316
317
	if (evsel == NULL)
		goto out;

318
319
	evsel->precise_max = true;

320
	/* use asprintf() because free(evsel) assumes name is allocated */
321
322
323
324
	if (asprintf(&evsel->name, "cycles%s%s%.*s",
		     (attr.precise_ip || attr.exclude_kernel) ? ":" : "",
		     attr.exclude_kernel ? "u" : "",
		     attr.precise_ip ? attr.precise_ip + 1 : 0, "ppp") < 0)
325
326
327
328
		goto error_free;
out:
	return evsel;
error_free:
329
	evsel__delete(evsel);
330
331
332
333
	evsel = NULL;
	goto out;
}

334
335
336
/*
 * Returns pointer with encoded error via <linux/err.h> interface.
 */
337
struct evsel *perf_evsel__newtp_idx(const char *sys, const char *name, int idx)
338
{
339
	struct evsel *evsel = zalloc(perf_evsel__object.size);
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
	int err = -ENOMEM;

	if (evsel == NULL) {
		goto out_err;
	} else {
		struct perf_event_attr attr = {
			.type	       = PERF_TYPE_TRACEPOINT,
			.sample_type   = (PERF_SAMPLE_RAW | PERF_SAMPLE_TIME |
					  PERF_SAMPLE_CPU | PERF_SAMPLE_PERIOD),
		};

		if (asprintf(&evsel->name, "%s:%s", sys, name) < 0)
			goto out_free;

		evsel->tp_format = trace_event__tp_format(sys, name);
		if (IS_ERR(evsel->tp_format)) {
			err = PTR_ERR(evsel->tp_format);
			goto out_free;
		}

		event_attr_init(&attr);
		attr.config = evsel->tp_format->id;
		attr.sample_period = 1;
363
		evsel__init(evsel, &attr, idx);
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
	}

	return evsel;

out_free:
	zfree(&evsel->name);
	free(evsel);
out_err:
	return ERR_PTR(err);
}

const char *perf_evsel__hw_names[PERF_COUNT_HW_MAX] = {
	"cycles",
	"instructions",
	"cache-references",
	"cache-misses",
	"branches",
	"branch-misses",
	"bus-cycles",
	"stalled-cycles-frontend",
	"stalled-cycles-backend",
	"ref-cycles",
};

static const char *__perf_evsel__hw_name(u64 config)
{
	if (config < PERF_COUNT_HW_MAX && perf_evsel__hw_names[config])
		return perf_evsel__hw_names[config];

	return "unknown-hardware";
}

396
static int perf_evsel__add_modifiers(struct evsel *evsel, char *bf, size_t size)
397
398
{
	int colon = 0, r = 0;
399
	struct perf_event_attr *attr = &evsel->core.attr;
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
	bool exclude_guest_default = false;

#define MOD_PRINT(context, mod)	do {					\
		if (!attr->exclude_##context) {				\
			if (!colon) colon = ++r;			\
			r += scnprintf(bf + r, size - r, "%c", mod);	\
		} } while(0)

	if (attr->exclude_kernel || attr->exclude_user || attr->exclude_hv) {
		MOD_PRINT(kernel, 'k');
		MOD_PRINT(user, 'u');
		MOD_PRINT(hv, 'h');
		exclude_guest_default = true;
	}

	if (attr->precise_ip) {
		if (!colon)
			colon = ++r;
		r += scnprintf(bf + r, size - r, "%.*s", attr->precise_ip, "ppp");
		exclude_guest_default = true;
	}

	if (attr->exclude_host || attr->exclude_guest == exclude_guest_default) {
		MOD_PRINT(host, 'H');
		MOD_PRINT(guest, 'G');
	}
#undef MOD_PRINT
	if (colon)
		bf[colon - 1] = ':';
	return r;
}

432
static int perf_evsel__hw_name(struct evsel *evsel, char *bf, size_t size)
433
{
434
	int r = scnprintf(bf, size, "%s", __perf_evsel__hw_name(evsel->core.attr.config));
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
	return r + perf_evsel__add_modifiers(evsel, bf + r, size - r);
}

const char *perf_evsel__sw_names[PERF_COUNT_SW_MAX] = {
	"cpu-clock",
	"task-clock",
	"page-faults",
	"context-switches",
	"cpu-migrations",
	"minor-faults",
	"major-faults",
	"alignment-faults",
	"emulation-faults",
	"dummy",
};

static const char *__perf_evsel__sw_name(u64 config)
{
	if (config < PERF_COUNT_SW_MAX && perf_evsel__sw_names[config])
		return perf_evsel__sw_names[config];
	return "unknown-software";
}

458
static int perf_evsel__sw_name(struct evsel *evsel, char *bf, size_t size)
459
{
460
	int r = scnprintf(bf, size, "%s", __perf_evsel__sw_name(evsel->core.attr.config));
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
	return r + perf_evsel__add_modifiers(evsel, bf + r, size - r);
}

static int __perf_evsel__bp_name(char *bf, size_t size, u64 addr, u64 type)
{
	int r;

	r = scnprintf(bf, size, "mem:0x%" PRIx64 ":", addr);

	if (type & HW_BREAKPOINT_R)
		r += scnprintf(bf + r, size - r, "r");

	if (type & HW_BREAKPOINT_W)
		r += scnprintf(bf + r, size - r, "w");

	if (type & HW_BREAKPOINT_X)
		r += scnprintf(bf + r, size - r, "x");

	return r;
}

482
static int perf_evsel__bp_name(struct evsel *evsel, char *bf, size_t size)
483
{
484
	struct perf_event_attr *attr = &evsel->core.attr;
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
	int r = __perf_evsel__bp_name(bf, size, attr->bp_addr, attr->bp_type);
	return r + perf_evsel__add_modifiers(evsel, bf + r, size - r);
}

const char *perf_evsel__hw_cache[PERF_COUNT_HW_CACHE_MAX]
				[PERF_EVSEL__MAX_ALIASES] = {
 { "L1-dcache",	"l1-d",		"l1d",		"L1-data",		},
 { "L1-icache",	"l1-i",		"l1i",		"L1-instruction",	},
 { "LLC",	"L2",							},
 { "dTLB",	"d-tlb",	"Data-TLB",				},
 { "iTLB",	"i-tlb",	"Instruction-TLB",			},
 { "branch",	"branches",	"bpu",		"btb",		"bpc",	},
 { "node",								},
};

const char *perf_evsel__hw_cache_op[PERF_COUNT_HW_CACHE_OP_MAX]
				   [PERF_EVSEL__MAX_ALIASES] = {
 { "load",	"loads",	"read",					},
 { "store",	"stores",	"write",				},
 { "prefetch",	"prefetches",	"speculative-read", "speculative-load",	},
};

const char *perf_evsel__hw_cache_result[PERF_COUNT_HW_CACHE_RESULT_MAX]
				       [PERF_EVSEL__MAX_ALIASES] = {
 { "refs",	"Reference",	"ops",		"access",		},
 { "misses",	"miss",							},
};

#define C(x)		PERF_COUNT_HW_CACHE_##x
#define CACHE_READ	(1 << C(OP_READ))
#define CACHE_WRITE	(1 << C(OP_WRITE))
#define CACHE_PREFETCH	(1 << C(OP_PREFETCH))
#define COP(x)		(1 << x)

/*
 * cache operartion stat
 * L1I : Read and prefetch only
 * ITLB and BPU : Read-only
 */
static unsigned long perf_evsel__hw_cache_stat[C(MAX)] = {
 [C(L1D)]	= (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH),
 [C(L1I)]	= (CACHE_READ | CACHE_PREFETCH),
 [C(LL)]	= (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH),
 [C(DTLB)]	= (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH),
 [C(ITLB)]	= (CACHE_READ),
 [C(BPU)]	= (CACHE_READ),
 [C(NODE)]	= (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH),
};

bool perf_evsel__is_cache_op_valid(u8 type, u8 op)
{
	if (perf_evsel__hw_cache_stat[type] & COP(op))
		return true;	/* valid */
	else
		return false;	/* invalid */
}

int __perf_evsel__hw_cache_type_op_res_name(u8 type, u8 op, u8 result,
					    char *bf, size_t size)
{
	if (result) {
		return scnprintf(bf, size, "%s-%s-%s", perf_evsel__hw_cache[type][0],
				 perf_evsel__hw_cache_op[op][0],
				 perf_evsel__hw_cache_result[result][0]);
	}

	return scnprintf(bf, size, "%s-%s", perf_evsel__hw_cache[type][0],
			 perf_evsel__hw_cache_op[op][1]);
}

static int __perf_evsel__hw_cache_name(u64 config, char *bf, size_t size)
{
	u8 op, result, type = (config >>  0) & 0xff;
	const char *err = "unknown-ext-hardware-cache-type";

560
	if (type >= PERF_COUNT_HW_CACHE_MAX)
561
562
563
564
		goto out_err;

	op = (config >>  8) & 0xff;
	err = "unknown-ext-hardware-cache-op";
565
	if (op >= PERF_COUNT_HW_CACHE_OP_MAX)
566
567
568
569
		goto out_err;

	result = (config >> 16) & 0xff;
	err = "unknown-ext-hardware-cache-result";
570
	if (result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
571
572
573
574
575
576
577
578
579
580
581
		goto out_err;

	err = "invalid-cache";
	if (!perf_evsel__is_cache_op_valid(type, op))
		goto out_err;

	return __perf_evsel__hw_cache_type_op_res_name(type, op, result, bf, size);
out_err:
	return scnprintf(bf, size, "%s", err);
}

582
static int perf_evsel__hw_cache_name(struct evsel *evsel, char *bf, size_t size)
583
{
584
	int ret = __perf_evsel__hw_cache_name(evsel->core.attr.config, bf, size);
585
586
587
	return ret + perf_evsel__add_modifiers(evsel, bf + ret, size - ret);
}

588
static int perf_evsel__raw_name(struct evsel *evsel, char *bf, size_t size)
589
{
590
	int ret = scnprintf(bf, size, "raw 0x%" PRIx64, evsel->core.attr.config);
591
592
593
	return ret + perf_evsel__add_modifiers(evsel, bf + ret, size - ret);
}

594
595
596
597
598
599
static int perf_evsel__tool_name(char *bf, size_t size)
{
	int ret = scnprintf(bf, size, "duration_time");
	return ret;
}

600
const char *perf_evsel__name(struct evsel *evsel)
601
602
603
{
	char bf[128];

604
605
606
	if (!evsel)
		goto out_unknown;

607
608
609
	if (evsel->name)
		return evsel->name;

610
	switch (evsel->core.attr.type) {
611
612
613
614
615
616
617
618
619
620
621
622
623
	case PERF_TYPE_RAW:
		perf_evsel__raw_name(evsel, bf, sizeof(bf));
		break;

	case PERF_TYPE_HARDWARE:
		perf_evsel__hw_name(evsel, bf, sizeof(bf));
		break;

	case PERF_TYPE_HW_CACHE:
		perf_evsel__hw_cache_name(evsel, bf, sizeof(bf));
		break;

	case PERF_TYPE_SOFTWARE:
624
625
626
627
		if (evsel->tool_event)
			perf_evsel__tool_name(bf, sizeof(bf));
		else
			perf_evsel__sw_name(evsel, bf, sizeof(bf));
628
629
630
631
632
633
634
635
636
637
638
639
		break;

	case PERF_TYPE_TRACEPOINT:
		scnprintf(bf, sizeof(bf), "%s", "unknown tracepoint");
		break;

	case PERF_TYPE_BREAKPOINT:
		perf_evsel__bp_name(evsel, bf, sizeof(bf));
		break;

	default:
		scnprintf(bf, sizeof(bf), "unknown attr type: %d",
640
			  evsel->core.attr.type);
641
642
643
644
645
		break;
	}

	evsel->name = strdup(bf);

646
647
648
649
	if (evsel->name)
		return evsel->name;
out_unknown:
	return "unknown";
650
651
}

652
const char *perf_evsel__group_name(struct evsel *evsel)
653
654
655
656
{
	return evsel->group_name ?: "anon group";
}

657
658
659
660
661
662
663
664
665
666
/*
 * Returns the group details for the specified leader,
 * with following rules.
 *
 *  For record -e '{cycles,instructions}'
 *    'anon group { cycles:u, instructions:u }'
 *
 *  For record -e 'cycles,instructions' and report --group
 *    'cycles:u, instructions:u'
 */
667
int perf_evsel__group_desc(struct evsel *evsel, char *buf, size_t size)
668
{
669
	int ret = 0;
670
	struct evsel *pos;
671
672
	const char *group_name = perf_evsel__group_name(evsel);

673
674
	if (!evsel->forced_leader)
		ret = scnprintf(buf, size, "%s { ", group_name);
675

676
	ret += scnprintf(buf + ret, size - ret, "%s",
677
678
679
680
681
682
			 perf_evsel__name(evsel));

	for_each_group_member(pos, evsel)
		ret += scnprintf(buf + ret, size - ret, ", %s",
				 perf_evsel__name(pos));

683
684
	if (!evsel->forced_leader)
		ret += scnprintf(buf + ret, size - ret, " }");
685
686
687
688

	return ret;
}

689
static void __perf_evsel__config_callchain(struct evsel *evsel,
690
691
					   struct record_opts *opts,
					   struct callchain_param *param)
692
693
{
	bool function = perf_evsel__is_function_event(evsel);
694
	struct perf_event_attr *attr = &evsel->core.attr;
695
696
697

	perf_evsel__set_sample_bit(evsel, CALLCHAIN);

698
699
	attr->sample_max_stack = param->max_stack;

700
701
702
703
	if (opts->kernel_callchains)
		attr->exclude_callchain_user = 1;
	if (opts->user_callchains)
		attr->exclude_callchain_kernel = 1;
704
705
706
707
708
709
710
711
712
713
714
	if (param->record_mode == CALLCHAIN_LBR) {
		if (!opts->branch_stack) {
			if (attr->exclude_user) {
				pr_warning("LBR callstack option is only available "
					   "to get user callchain information. "
					   "Falling back to framepointers.\n");
			} else {
				perf_evsel__set_sample_bit(evsel, BRANCH_STACK);
				attr->branch_sample_type = PERF_SAMPLE_BRANCH_USER |
							PERF_SAMPLE_BRANCH_CALL_STACK |
							PERF_SAMPLE_BRANCH_NO_CYCLES |
715
716
							PERF_SAMPLE_BRANCH_NO_FLAGS |
							PERF_SAMPLE_BRANCH_HW_INDEX;
717
718
719
720
721
722
723
724
725
726
			}
		} else
			 pr_warning("Cannot use LBR callstack with branch stack. "
				    "Falling back to framepointers.\n");
	}

	if (param->record_mode == CALLCHAIN_DWARF) {
		if (!function) {
			perf_evsel__set_sample_bit(evsel, REGS_USER);
			perf_evsel__set_sample_bit(evsel, STACK_USER);
727
728
729
730
731
732
733
734
			if (opts->sample_user_regs && DWARF_MINIMAL_REGS != PERF_REGS_MASK) {
				attr->sample_regs_user |= DWARF_MINIMAL_REGS;
				pr_warning("WARNING: The use of --call-graph=dwarf may require all the user registers, "
					   "specifying a subset with --user-regs may render DWARF unwinding unreliable, "
					   "so the minimal registers set (IP, SP) is explicitly forced.\n");
			} else {
				attr->sample_regs_user |= PERF_REGS_MASK;
			}
735
736
737
738
739
740
741
742
743
744
745
746
747
748
			attr->sample_stack_user = param->dump_size;
			attr->exclude_callchain_user = 1;
		} else {
			pr_info("Cannot use DWARF unwind for function trace event,"
				" falling back to framepointers.\n");
		}
	}

	if (function) {
		pr_info("Disabling user space callchains for function trace event.\n");
		attr->exclude_callchain_user = 1;
	}
}

749
void perf_evsel__config_callchain(struct evsel *evsel,
750
751
752
753
754
755
756
				  struct record_opts *opts,
				  struct callchain_param *param)
{
	if (param->enabled)
		return __perf_evsel__config_callchain(evsel, opts, param);
}

757
static void
758
perf_evsel__reset_callgraph(struct evsel *evsel,
759
760
			    struct callchain_param *param)
{
761
	struct perf_event_attr *attr = &evsel->core.attr;
762
763
764
765
766

	perf_evsel__reset_sample_bit(evsel, CALLCHAIN);
	if (param->record_mode == CALLCHAIN_LBR) {
		perf_evsel__reset_sample_bit(evsel, BRANCH_STACK);
		attr->branch_sample_type &= ~(PERF_SAMPLE_BRANCH_USER |
767
768
					      PERF_SAMPLE_BRANCH_CALL_STACK |
					      PERF_SAMPLE_BRANCH_HW_INDEX);
769
770
771
772
773
774
775
	}
	if (param->record_mode == CALLCHAIN_DWARF) {
		perf_evsel__reset_sample_bit(evsel, REGS_USER);
		perf_evsel__reset_sample_bit(evsel, STACK_USER);
	}
}

776
static void apply_config_terms(struct evsel *evsel,
777
			       struct record_opts *opts, bool track)
778
779
780
{
	struct perf_evsel_config_term *term;
	struct list_head *config_terms = &evsel->config_terms;
781
	struct perf_event_attr *attr = &evsel->core.attr;
782
783
784
785
	/* callgraph default */
	struct callchain_param param = {
		.record_mode = callchain_param.record_mode,
	};
786
	u32 dump_size = 0;
787
788
	int max_stack = 0;
	const char *callgraph_buf = NULL;
789
790
791
792

	list_for_each_entry(term, config_terms, list) {
		switch (term->type) {
		case PERF_EVSEL__CONFIG_TERM_PERIOD:
793
794
795
			if (!(term->weak && opts->user_interval != ULLONG_MAX)) {
				attr->sample_period = term->val.period;
				attr->freq = 0;
796
				perf_evsel__reset_sample_bit(evsel, PERIOD);
797
			}
798
799
			break;
		case PERF_EVSEL__CONFIG_TERM_FREQ:
800
801
802
			if (!(term->weak && opts->user_freq != UINT_MAX)) {
				attr->sample_freq = term->val.freq;
				attr->freq = 1;
803
				perf_evsel__set_sample_bit(evsel, PERIOD);
804
			}
805
806
807
808
809
810
811
812
			break;
		case PERF_EVSEL__CONFIG_TERM_TIME:
			if (term->val.time)
				perf_evsel__set_sample_bit(evsel, TIME);
			else
				perf_evsel__reset_sample_bit(evsel, TIME);
			break;
		case PERF_EVSEL__CONFIG_TERM_CALLGRAPH:
813
			callgraph_buf = term->val.str;
814
			break;
815
		case PERF_EVSEL__CONFIG_TERM_BRANCH:
816
			if (term->val.str && strcmp(term->val.str, "no")) {
817
				perf_evsel__set_sample_bit(evsel, BRANCH_STACK);
818
				parse_branch_str(term->val.str,
819
820
821
822
						 &attr->branch_sample_type);
			} else
				perf_evsel__reset_sample_bit(evsel, BRANCH_STACK);
			break;
823
824
825
		case PERF_EVSEL__CONFIG_TERM_STACK_USER:
			dump_size = term->val.stack_user;
			break;
826
827
828
		case PERF_EVSEL__CONFIG_TERM_MAX_STACK:
			max_stack = term->val.max_stack;
			break;
829
830
831
		case PERF_EVSEL__CONFIG_TERM_MAX_EVENTS:
			evsel->max_events = term->val.max_events;
			break;
832
833
834
835
836
837
838
839
840
		case PERF_EVSEL__CONFIG_TERM_INHERIT:
			/*
			 * attr->inherit should has already been set by
			 * perf_evsel__config. If user explicitly set
			 * inherit using config terms, override global
			 * opt->no_inherit setting.
			 */
			attr->inherit = term->val.inherit ? 1 : 0;
			break;
841
842
843
		case PERF_EVSEL__CONFIG_TERM_OVERWRITE:
			attr->write_backward = term->val.overwrite ? 1 : 0;
			break;
844
845
		case PERF_EVSEL__CONFIG_TERM_DRV_CFG:
			break;
846
847
		case PERF_EVSEL__CONFIG_TERM_PERCORE:
			break;
848
849
850
		case PERF_EVSEL__CONFIG_TERM_AUX_OUTPUT:
			attr->aux_output = term->val.aux_output ? 1 : 0;
			break;
851
852
853
854
855
		case PERF_EVSEL__CONFIG_TERM_AUX_SAMPLE_SIZE:
			/* Already applied by auxtrace */
			break;
		case PERF_EVSEL__CONFIG_TERM_CFG_CHG:
			break;
856
857
858
859
860
861
		default:
			break;
		}
	}

	/* User explicitly set per-event callgraph, clear the old setting and reset. */
862
	if ((callgraph_buf != NULL) || (dump_size > 0) || max_stack) {
863
864
		bool sample_address = false;

865
866
867
868
869
		if (max_stack) {
			param.max_stack = max_stack;
			if (callgraph_buf == NULL)
				callgraph_buf = "fp";
		}
870
871
872
873
874
875
876
877
878
879
880
881
882
883

		/* parse callgraph parameters */
		if (callgraph_buf != NULL) {
			if (!strcmp(callgraph_buf, "no")) {
				param.enabled = false;
				param.record_mode = CALLCHAIN_NONE;
			} else {
				param.enabled = true;
				if (parse_callchain_record(callgraph_buf, &param)) {
					pr_err("per-event callgraph setting for %s failed. "
					       "Apply callgraph global setting for it\n",
					       evsel->name);
					return;
				}
884
885
				if (param.record_mode == CALLCHAIN_DWARF)
					sample_address = true;
886
887
888
889
890
891
892
893
894
895
896
897
			}
		}
		if (dump_size > 0) {
			dump_size = round_up(dump_size, sizeof(u64));
			param.dump_size = dump_size;
		}

		/* If global callgraph set, clear it */
		if (callchain_param.enabled)
			perf_evsel__reset_callgraph(evsel, &callchain_param);

		/* set perf-event callgraph */
898
899
900
901
		if (param.enabled) {
			if (sample_address) {
				perf_evsel__set_sample_bit(evsel, ADDR);
				perf_evsel__set_sample_bit(evsel, DATA_SRC);
902
				evsel->core.attr.mmap_data = track;
903
			}
904
			perf_evsel__config_callchain(evsel, opts, &param);
905
		}
906
907
908
	}
}

909
static bool is_dummy_event(struct evsel *evsel)
910
{
911
912
	return (evsel->core.attr.type == PERF_TYPE_SOFTWARE) &&
	       (evsel->core.attr.config == PERF_COUNT_SW_DUMMY);
913
914
}

915
916
917
918
919
920
921
922
923
924
925
926
927
struct perf_evsel_config_term *__perf_evsel__get_config_term(struct evsel *evsel,
							     enum evsel_term_type type)
{
	struct perf_evsel_config_term *term, *found_term = NULL;

	list_for_each_entry(term, &evsel->config_terms, list) {
		if (term->type == type)
			found_term = term;
	}

	return found_term;
}

928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
/*
 * The enable_on_exec/disabled value strategy:
 *
 *  1) For any type of traced program:
 *    - all independent events and group leaders are disabled
 *    - all group members are enabled
 *
 *     Group members are ruled by group leaders. They need to
 *     be enabled, because the group scheduling relies on that.
 *
 *  2) For traced programs executed by perf:
 *     - all independent events and group leaders have
 *       enable_on_exec set
 *     - we don't specifically enable or disable any event during
 *       the record command
 *
 *     Independent events and group leaders are initially disabled
 *     and get enabled by exec. Group members are ruled by group
 *     leaders as stated in 1).
 *
 *  3) For traced programs attached by perf (pid/tid):
 *     - we specifically enable or disable all events during
 *       the record command
 *
 *     When attaching events to already running traced we
 *     enable/disable events specifically, as there's no
 *     initial traced exec call.
 */
956
void perf_evsel__config(struct evsel *evsel, struct record_opts *opts,
957
958
			struct callchain_param *callchain)
{
959
960
	struct evsel *leader = evsel->leader;
	struct perf_event_attr *attr = &evsel->core.attr;
961
962
963
964
965
	int track = evsel->tracking;
	bool per_cpu = opts->target.default_per_cpu && !opts->target.per_thread;

	attr->sample_id_all = perf_missing_features.sample_id_all ? 0 : 1;
	attr->inherit	    = !opts->no_inherit;
966
	attr->write_backward = opts->overwrite ? 1 : 0;
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983

	perf_evsel__set_sample_bit(evsel, IP);
	perf_evsel__set_sample_bit(evsel, TID);

	if (evsel->sample_read) {
		perf_evsel__set_sample_bit(evsel, READ);

		/*
		 * We need ID even in case of single event, because
		 * PERF_SAMPLE_READ process ID specific data.
		 */
		perf_evsel__set_sample_id(evsel, false);

		/*
		 * Apply group format only if we belong to group
		 * with more than one members.
		 */
984
		if (leader->core.nr_members > 1) {
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
			attr->read_format |= PERF_FORMAT_GROUP;
			attr->inherit = 0;
		}
	}

	/*
	 * We default some events to have a default interval. But keep
	 * it a weak assumption overridable by the user.
	 */
	if (!attr->sample_period || (opts->user_freq != UINT_MAX ||
				     opts->user_interval != ULLONG_MAX)) {
		if (opts->freq) {
			perf_evsel__set_sample_bit(evsel, PERIOD);
			attr->freq		= 1;
			attr->sample_freq	= opts->freq;
		} else {
For faster browsing, not all history is shown. View entire blame