blob: 2603e1f917112e3a9e2f0b2c93bd6d5800f5ad7d [file] [log] [blame]
Christopher Ferris17e91d42013-10-21 13:30:52 -07001/*
2 * Copyright (C) 2013 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include <dirent.h>
18#include <errno.h>
19#include <pthread.h>
20#include <signal.h>
21#include <stdbool.h>
22#include <stdio.h>
23#include <stdlib.h>
24#include <string.h>
25#include <sys/ptrace.h>
26#include <sys/types.h>
27#include <sys/wait.h>
28#include <time.h>
29#include <unistd.h>
30
31#include <backtrace/backtrace.h>
32
33#include <cutils/atomic.h>
34#include <gtest/gtest.h>
35
36#include <vector>
37
38#include "thread_utils.h"
39
40// Number of microseconds per milliseconds.
41#define US_PER_MSEC 1000
42
43// Number of nanoseconds in a second.
44#define NS_PER_SEC 1000000000ULL
45
46// Number of simultaneous dumping operations to perform.
47#define NUM_THREADS 20
48
49// Number of simultaneous threads running in our forked process.
50#define NUM_PTRACE_THREADS 5
51
52typedef struct {
53 pid_t tid;
54 int32_t state;
55 pthread_t threadId;
56} thread_t;
57
58typedef struct {
59 thread_t thread;
60 backtrace_context_t context;
61 int32_t* now;
62 int32_t done;
63} dump_thread_t;
64
65extern "C" {
66// Prototypes for functions in the test library.
67int test_level_one(int, int, int, int, void (*)(void*), void*);
68
69int test_recursive_call(int, void (*)(void*), void*);
70}
71
72uint64_t NanoTime() {
73 struct timespec t = { 0, 0 };
74 clock_gettime(CLOCK_MONOTONIC, &t);
75 return static_cast<uint64_t>(t.tv_sec * NS_PER_SEC + t.tv_nsec);
76}
77
78void DumpFrames(const backtrace_context_t* context) {
79 if (context->backtrace->num_frames == 0) {
80 printf(" No frames to dump\n");
81 } else {
82 char line[512];
83 for (size_t i = 0; i < context->backtrace->num_frames; i++) {
84 backtrace_format_frame_data(context, i, line, sizeof(line));
85 printf(" %s\n", line);
86 }
87 }
88}
89
90void WaitForStop(pid_t pid) {
91 uint64_t start = NanoTime();
92
93 siginfo_t si;
94 while (ptrace(PTRACE_GETSIGINFO, pid, 0, &si) < 0 && (errno == EINTR || errno == ESRCH)) {
95 if ((NanoTime() - start) > NS_PER_SEC) {
96 printf("The process did not get to a stopping point in 1 second.\n");
97 break;
98 }
99 usleep(US_PER_MSEC);
100 }
101}
102
103bool ReadyLevelBacktrace(const backtrace_t* backtrace) {
104 // See if test_level_four is in the backtrace.
105 bool found = false;
106 for (size_t i = 0; i < backtrace->num_frames; i++) {
107 if (backtrace->frames[i].func_name != NULL &&
108 strcmp(backtrace->frames[i].func_name, "test_level_four") == 0) {
109 found = true;
110 break;
111 }
112 }
113
114 return found;
115}
116
117void VerifyLevelDump(const backtrace_t* backtrace) {
118 ASSERT_GT(backtrace->num_frames, static_cast<size_t>(0));
119 ASSERT_LT(backtrace->num_frames, static_cast<size_t>(MAX_BACKTRACE_FRAMES));
120
121 // Look through the frames starting at the highest to find the
122 // frame we want.
123 size_t frame_num = 0;
124 for (size_t i = backtrace->num_frames-1; i > 2; i--) {
125 if (backtrace->frames[i].func_name != NULL &&
126 strcmp(backtrace->frames[i].func_name, "test_level_one") == 0) {
127 frame_num = i;
128 break;
129 }
130 }
131 ASSERT_GT(frame_num, static_cast<size_t>(0));
132
133 ASSERT_TRUE(NULL != backtrace->frames[frame_num].func_name);
134 ASSERT_STREQ(backtrace->frames[frame_num].func_name, "test_level_one");
135 ASSERT_TRUE(NULL != backtrace->frames[frame_num-1].func_name);
136 ASSERT_STREQ(backtrace->frames[frame_num-1].func_name, "test_level_two");
137 ASSERT_TRUE(NULL != backtrace->frames[frame_num-2].func_name);
138 ASSERT_STREQ(backtrace->frames[frame_num-2].func_name, "test_level_three");
139 ASSERT_TRUE(NULL != backtrace->frames[frame_num-3].func_name);
140 ASSERT_STREQ(backtrace->frames[frame_num-3].func_name, "test_level_four");
141}
142
143void VerifyLevelBacktrace(void*) {
144 backtrace_context_t context;
145
Christopher Ferris7f081ec2013-11-05 11:38:05 -0800146 ASSERT_TRUE(backtrace_create_context(&context, BACKTRACE_CURRENT_PROCESS, BACKTRACE_NO_TID, 0));
Christopher Ferris17e91d42013-10-21 13:30:52 -0700147
148 VerifyLevelDump(context.backtrace);
149
150 backtrace_destroy_context(&context);
151}
152
153bool ReadyMaxBacktrace(const backtrace_t* backtrace) {
154 return (backtrace->num_frames == MAX_BACKTRACE_FRAMES);
155}
156
157void VerifyMaxDump(const backtrace_t* backtrace) {
158 ASSERT_EQ(backtrace->num_frames, static_cast<size_t>(MAX_BACKTRACE_FRAMES));
159 // Verify that the last frame is our recursive call.
160 ASSERT_TRUE(NULL != backtrace->frames[MAX_BACKTRACE_FRAMES-1].func_name);
161 ASSERT_STREQ(backtrace->frames[MAX_BACKTRACE_FRAMES-1].func_name,
162 "test_recursive_call");
163}
164
165void VerifyMaxBacktrace(void*) {
166 backtrace_context_t context;
167
Christopher Ferris7f081ec2013-11-05 11:38:05 -0800168 ASSERT_TRUE(backtrace_create_context(&context, BACKTRACE_CURRENT_PROCESS, BACKTRACE_NO_TID, 0));
Christopher Ferris17e91d42013-10-21 13:30:52 -0700169
170 VerifyMaxDump(context.backtrace);
171
172 backtrace_destroy_context(&context);
173}
174
175void ThreadSetState(void* data) {
176 thread_t* thread = reinterpret_cast<thread_t*>(data);
177 android_atomic_acquire_store(1, &thread->state);
178 volatile int i = 0;
179 while (thread->state) {
180 i++;
181 }
182}
183
184void VerifyThreadTest(pid_t tid, void (*VerifyFunc)(const backtrace_t*)) {
185 backtrace_context_t context;
186
187 backtrace_create_context(&context, getpid(), tid, 0);
188
189 VerifyFunc(context.backtrace);
190
191 backtrace_destroy_context(&context);
192}
193
194bool WaitForNonZero(int32_t* value, uint64_t seconds) {
195 uint64_t start = NanoTime();
196 do {
197 if (android_atomic_acquire_load(value)) {
198 return true;
199 }
200 } while ((NanoTime() - start) < seconds * NS_PER_SEC);
201 return false;
202}
203
204TEST(libbacktrace, local_trace) {
205 ASSERT_NE(test_level_one(1, 2, 3, 4, VerifyLevelBacktrace, NULL), 0);
206}
207
208void VerifyIgnoreFrames(
209 const backtrace_t* bt_all, const backtrace_t* bt_ign1,
210 const backtrace_t* bt_ign2, const char* cur_proc) {
211 EXPECT_EQ(bt_all->num_frames, bt_ign1->num_frames + 1);
212 EXPECT_EQ(bt_all->num_frames, bt_ign2->num_frames + 2);
213
214 // Check all of the frames are the same > the current frame.
215 bool check = (cur_proc == NULL);
216 for (size_t i = 0; i < bt_ign2->num_frames; i++) {
217 if (check) {
218 EXPECT_EQ(bt_ign2->frames[i].pc, bt_ign1->frames[i+1].pc);
219 EXPECT_EQ(bt_ign2->frames[i].sp, bt_ign1->frames[i+1].sp);
220 EXPECT_EQ(bt_ign2->frames[i].stack_size, bt_ign1->frames[i+1].stack_size);
221
222 EXPECT_EQ(bt_ign2->frames[i].pc, bt_all->frames[i+2].pc);
223 EXPECT_EQ(bt_ign2->frames[i].sp, bt_all->frames[i+2].sp);
224 EXPECT_EQ(bt_ign2->frames[i].stack_size, bt_all->frames[i+2].stack_size);
225 }
226 if (!check && bt_ign2->frames[i].func_name &&
227 strcmp(bt_ign2->frames[i].func_name, cur_proc) == 0) {
228 check = true;
229 }
230 }
231}
232
233void VerifyLevelIgnoreFrames(void*) {
234 backtrace_context_t all;
Christopher Ferris7f081ec2013-11-05 11:38:05 -0800235 ASSERT_TRUE(backtrace_create_context(&all, BACKTRACE_CURRENT_PROCESS, BACKTRACE_NO_TID, 0));
Christopher Ferris17e91d42013-10-21 13:30:52 -0700236 ASSERT_TRUE(all.backtrace != NULL);
237
238 backtrace_context_t ign1;
Christopher Ferris7f081ec2013-11-05 11:38:05 -0800239 ASSERT_TRUE(backtrace_create_context(&ign1, BACKTRACE_CURRENT_PROCESS, BACKTRACE_NO_TID, 1));
Christopher Ferris17e91d42013-10-21 13:30:52 -0700240 ASSERT_TRUE(ign1.backtrace != NULL);
241
242 backtrace_context_t ign2;
Christopher Ferris7f081ec2013-11-05 11:38:05 -0800243 ASSERT_TRUE(backtrace_create_context(&ign2, BACKTRACE_CURRENT_PROCESS, BACKTRACE_NO_TID, 2));
Christopher Ferris17e91d42013-10-21 13:30:52 -0700244 ASSERT_TRUE(ign2.backtrace != NULL);
245
246 VerifyIgnoreFrames(all.backtrace, ign1.backtrace, ign2.backtrace,
247 "VerifyLevelIgnoreFrames");
248
249 backtrace_destroy_context(&all);
250 backtrace_destroy_context(&ign1);
251 backtrace_destroy_context(&ign2);
252}
253
254TEST(libbacktrace, local_trace_ignore_frames) {
255 ASSERT_NE(test_level_one(1, 2, 3, 4, VerifyLevelIgnoreFrames, NULL), 0);
256}
257
258TEST(libbacktrace, local_max_trace) {
259 ASSERT_NE(test_recursive_call(MAX_BACKTRACE_FRAMES+10, VerifyMaxBacktrace, NULL), 0);
260}
261
262void VerifyProcTest(pid_t pid, pid_t tid,
263 bool (*ReadyFunc)(const backtrace_t*),
264 void (*VerifyFunc)(const backtrace_t*)) {
265 pid_t ptrace_tid;
266 if (tid < 0) {
267 ptrace_tid = pid;
268 } else {
269 ptrace_tid = tid;
270 }
271 uint64_t start = NanoTime();
272 bool verified = false;
273 do {
274 usleep(US_PER_MSEC);
275 if (ptrace(PTRACE_ATTACH, ptrace_tid, 0, 0) == 0) {
276 // Wait for the process to get to a stopping point.
277 WaitForStop(ptrace_tid);
278
279 backtrace_context_t context;
280 ASSERT_TRUE(backtrace_create_context(&context, pid, tid, 0));
281 if (ReadyFunc(context.backtrace)) {
282 VerifyFunc(context.backtrace);
283 verified = true;
284 }
285 backtrace_destroy_context(&context);
286 ASSERT_TRUE(ptrace(PTRACE_DETACH, ptrace_tid, 0, 0) == 0);
287 }
288 // If 5 seconds have passed, then we are done.
289 } while (!verified && (NanoTime() - start) <= 5 * NS_PER_SEC);
290 ASSERT_TRUE(verified);
291}
292
293TEST(libbacktrace, ptrace_trace) {
294 pid_t pid;
295 if ((pid = fork()) == 0) {
296 ASSERT_NE(test_level_one(1, 2, 3, 4, NULL, NULL), 0);
297 exit(1);
298 }
Christopher Ferris7f081ec2013-11-05 11:38:05 -0800299 VerifyProcTest(pid, BACKTRACE_NO_TID, ReadyLevelBacktrace, VerifyLevelDump);
Christopher Ferris17e91d42013-10-21 13:30:52 -0700300
301 kill(pid, SIGKILL);
302 int status;
303 ASSERT_EQ(waitpid(pid, &status, 0), pid);
304}
305
306TEST(libbacktrace, ptrace_max_trace) {
307 pid_t pid;
308 if ((pid = fork()) == 0) {
309 ASSERT_NE(test_recursive_call(MAX_BACKTRACE_FRAMES+10, NULL, NULL), 0);
310 exit(1);
311 }
Christopher Ferris7f081ec2013-11-05 11:38:05 -0800312 VerifyProcTest(pid, BACKTRACE_NO_TID, ReadyMaxBacktrace, VerifyMaxDump);
Christopher Ferris17e91d42013-10-21 13:30:52 -0700313
314 kill(pid, SIGKILL);
315 int status;
316 ASSERT_EQ(waitpid(pid, &status, 0), pid);
317}
318
319void VerifyProcessIgnoreFrames(const backtrace_t* bt_all) {
320 pid_t pid = bt_all->pid;
321
322 backtrace_context_t ign1;
Christopher Ferris7f081ec2013-11-05 11:38:05 -0800323 ASSERT_TRUE(backtrace_create_context(&ign1, pid, BACKTRACE_NO_TID, 1));
Christopher Ferris17e91d42013-10-21 13:30:52 -0700324 ASSERT_TRUE(ign1.backtrace != NULL);
325
326 backtrace_context_t ign2;
Christopher Ferris7f081ec2013-11-05 11:38:05 -0800327 ASSERT_TRUE(backtrace_create_context(&ign2, pid, BACKTRACE_NO_TID, 2));
Christopher Ferris17e91d42013-10-21 13:30:52 -0700328 ASSERT_TRUE(ign2.backtrace != NULL);
329
330 VerifyIgnoreFrames(bt_all, ign1.backtrace, ign2.backtrace, NULL);
331
332 backtrace_destroy_context(&ign1);
333 backtrace_destroy_context(&ign2);
334}
335
336TEST(libbacktrace, ptrace_ignore_frames) {
337 pid_t pid;
338 if ((pid = fork()) == 0) {
339 ASSERT_NE(test_level_one(1, 2, 3, 4, NULL, NULL), 0);
340 exit(1);
341 }
Christopher Ferris7f081ec2013-11-05 11:38:05 -0800342 VerifyProcTest(pid, BACKTRACE_NO_TID, ReadyLevelBacktrace, VerifyProcessIgnoreFrames);
Christopher Ferris17e91d42013-10-21 13:30:52 -0700343
344 kill(pid, SIGKILL);
345 int status;
346 ASSERT_EQ(waitpid(pid, &status, 0), pid);
347}
348
349// Create a process with multiple threads and dump all of the threads.
350void* PtraceThreadLevelRun(void*) {
351 EXPECT_NE(test_level_one(1, 2, 3, 4, NULL, NULL), 0);
352 return NULL;
353}
354
355void GetThreads(pid_t pid, std::vector<pid_t>* threads) {
356 // Get the list of tasks.
357 char task_path[128];
358 snprintf(task_path, sizeof(task_path), "/proc/%d/task", pid);
359
360 DIR* tasks_dir = opendir(task_path);
361 ASSERT_TRUE(tasks_dir != NULL);
362 struct dirent* entry;
363 while ((entry = readdir(tasks_dir)) != NULL) {
364 char* end;
365 pid_t tid = strtoul(entry->d_name, &end, 10);
366 if (*end == '\0') {
367 threads->push_back(tid);
368 }
369 }
370 closedir(tasks_dir);
371}
372
373TEST(libbacktrace, ptrace_threads) {
374 pid_t pid;
375 if ((pid = fork()) == 0) {
376 for (size_t i = 0; i < NUM_PTRACE_THREADS; i++) {
377 pthread_attr_t attr;
378 pthread_attr_init(&attr);
379 pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
380
381 pthread_t thread;
382 ASSERT_TRUE(pthread_create(&thread, &attr, PtraceThreadLevelRun, NULL) == 0);
383 }
384 ASSERT_NE(test_level_one(1, 2, 3, 4, NULL, NULL), 0);
385 exit(1);
386 }
387
388 // Check to see that all of the threads are running before unwinding.
389 std::vector<pid_t> threads;
390 uint64_t start = NanoTime();
391 do {
392 usleep(US_PER_MSEC);
393 threads.clear();
394 GetThreads(pid, &threads);
395 } while ((threads.size() != NUM_PTRACE_THREADS + 1) &&
396 ((NanoTime() - start) <= 5 * NS_PER_SEC));
397 ASSERT_EQ(threads.size(), static_cast<size_t>(NUM_PTRACE_THREADS + 1));
398
399 ASSERT_TRUE(ptrace(PTRACE_ATTACH, pid, 0, 0) == 0);
400 WaitForStop(pid);
401 for (std::vector<int>::const_iterator it = threads.begin(); it != threads.end(); ++it) {
402 // Skip the current forked process, we only care about the threads.
403 if (pid == *it) {
404 continue;
405 }
406 VerifyProcTest(pid, *it, ReadyLevelBacktrace, VerifyLevelDump);
407 }
408 ASSERT_TRUE(ptrace(PTRACE_DETACH, pid, 0, 0) == 0);
409
410 kill(pid, SIGKILL);
411 int status;
412 ASSERT_EQ(waitpid(pid, &status, 0), pid);
413}
414
415void VerifyLevelThread(void*) {
416 backtrace_context_t context;
417
418 ASSERT_TRUE(backtrace_create_context(&context, getpid(), gettid(), 0));
419
420 VerifyLevelDump(context.backtrace);
421
422 backtrace_destroy_context(&context);
423}
424
425TEST(libbacktrace, thread_current_level) {
426 ASSERT_NE(test_level_one(1, 2, 3, 4, VerifyLevelThread, NULL), 0);
427}
428
429void VerifyMaxThread(void*) {
430 backtrace_context_t context;
431
432 ASSERT_TRUE(backtrace_create_context(&context, getpid(), gettid(), 0));
433
434 VerifyMaxDump(context.backtrace);
435
436 backtrace_destroy_context(&context);
437}
438
439TEST(libbacktrace, thread_current_max) {
440 ASSERT_NE(test_recursive_call(MAX_BACKTRACE_FRAMES+10, VerifyMaxThread, NULL), 0);
441}
442
443void* ThreadLevelRun(void* data) {
444 thread_t* thread = reinterpret_cast<thread_t*>(data);
445
446 thread->tid = gettid();
447 EXPECT_NE(test_level_one(1, 2, 3, 4, ThreadSetState, data), 0);
448 return NULL;
449}
450
451TEST(libbacktrace, thread_level_trace) {
452 pthread_attr_t attr;
453 pthread_attr_init(&attr);
454 pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
455
456 thread_t thread_data = { 0, 0, 0 };
457 pthread_t thread;
458 ASSERT_TRUE(pthread_create(&thread, &attr, ThreadLevelRun, &thread_data) == 0);
459
460 // Wait up to 2 seconds for the tid to be set.
461 ASSERT_TRUE(WaitForNonZero(&thread_data.state, 2));
462
463 // Save the current signal action and make sure it is restored afterwards.
464 struct sigaction cur_action;
465 ASSERT_TRUE(sigaction(SIGURG, NULL, &cur_action) == 0);
466
467 backtrace_context_t context;
468
469 ASSERT_TRUE(backtrace_create_context(&context, getpid(), thread_data.tid,0));
470
471 VerifyLevelDump(context.backtrace);
472
473 backtrace_destroy_context(&context);
474
475 // Tell the thread to exit its infinite loop.
476 android_atomic_acquire_store(0, &thread_data.state);
477
478 // Verify that the old action was restored.
479 struct sigaction new_action;
480 ASSERT_TRUE(sigaction(SIGURG, NULL, &new_action) == 0);
481 EXPECT_EQ(cur_action.sa_sigaction, new_action.sa_sigaction);
482 EXPECT_EQ(cur_action.sa_flags, new_action.sa_flags);
483}
484
485TEST(libbacktrace, thread_ignore_frames) {
486 pthread_attr_t attr;
487 pthread_attr_init(&attr);
488 pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
489
490 thread_t thread_data = { 0, 0, 0 };
491 pthread_t thread;
492 ASSERT_TRUE(pthread_create(&thread, &attr, ThreadLevelRun, &thread_data) == 0);
493
494 // Wait up to 2 seconds for the tid to be set.
495 ASSERT_TRUE(WaitForNonZero(&thread_data.state, 2));
496
497 backtrace_context_t all;
498 ASSERT_TRUE(backtrace_create_context(&all, getpid(), thread_data.tid, 0));
499
500 backtrace_context_t ign1;
501 ASSERT_TRUE(backtrace_create_context(&ign1, getpid(), thread_data.tid, 1));
502
503 backtrace_context_t ign2;
504 ASSERT_TRUE(backtrace_create_context(&ign2, getpid(), thread_data.tid, 2));
505
506 VerifyIgnoreFrames(all.backtrace, ign1.backtrace, ign2.backtrace, NULL);
507
508 backtrace_destroy_context(&all);
509 backtrace_destroy_context(&ign1);
510 backtrace_destroy_context(&ign2);
511
512 // Tell the thread to exit its infinite loop.
513 android_atomic_acquire_store(0, &thread_data.state);
514}
515
516void* ThreadMaxRun(void* data) {
517 thread_t* thread = reinterpret_cast<thread_t*>(data);
518
519 thread->tid = gettid();
520 EXPECT_NE(test_recursive_call(MAX_BACKTRACE_FRAMES+10, ThreadSetState, data), 0);
521 return NULL;
522}
523
524TEST(libbacktrace, thread_max_trace) {
525 pthread_attr_t attr;
526 pthread_attr_init(&attr);
527 pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
528
529 thread_t thread_data = { 0, 0, 0 };
530 pthread_t thread;
531 ASSERT_TRUE(pthread_create(&thread, &attr, ThreadMaxRun, &thread_data) == 0);
532
533 // Wait for the tid to be set.
534 ASSERT_TRUE(WaitForNonZero(&thread_data.state, 2));
535
536 backtrace_context_t context;
537
538 ASSERT_TRUE(backtrace_create_context(&context, getpid(), thread_data.tid, 0));
539
540 VerifyMaxDump(context.backtrace);
541
542 backtrace_destroy_context(&context);
543
544 // Tell the thread to exit its infinite loop.
545 android_atomic_acquire_store(0, &thread_data.state);
546}
547
548void* ThreadDump(void* data) {
549 dump_thread_t* dump = reinterpret_cast<dump_thread_t*>(data);
550 while (true) {
551 if (android_atomic_acquire_load(dump->now)) {
552 break;
553 }
554 }
555
556 dump->context.data = NULL;
557 dump->context.backtrace = NULL;
558
559 // The status of the actual unwind will be checked elsewhere.
560 backtrace_create_context(&dump->context, getpid(), dump->thread.tid, 0);
561
562 android_atomic_acquire_store(1, &dump->done);
563
564 return NULL;
565}
566
567TEST(libbacktrace, thread_multiple_dump) {
568 // Dump NUM_THREADS simultaneously.
569 std::vector<thread_t> runners(NUM_THREADS);
570 std::vector<dump_thread_t> dumpers(NUM_THREADS);
571
572 pthread_attr_t attr;
573 pthread_attr_init(&attr);
574 pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
575 for (size_t i = 0; i < NUM_THREADS; i++) {
576 // Launch the runners, they will spin in hard loops doing nothing.
577 runners[i].tid = 0;
578 runners[i].state = 0;
579 ASSERT_TRUE(pthread_create(&runners[i].threadId, &attr, ThreadMaxRun, &runners[i]) == 0);
580 }
581
582 // Wait for tids to be set.
583 for (std::vector<thread_t>::iterator it = runners.begin(); it != runners.end(); ++it) {
584 ASSERT_TRUE(WaitForNonZero(&it->state, 10));
585 }
586
587 // Start all of the dumpers at once, they will spin until they are signalled
588 // to begin their dump run.
589 int32_t dump_now = 0;
590 for (size_t i = 0; i < NUM_THREADS; i++) {
591 dumpers[i].thread.tid = runners[i].tid;
592 dumpers[i].thread.state = 0;
593 dumpers[i].done = 0;
594 dumpers[i].now = &dump_now;
595
596 ASSERT_TRUE(pthread_create(&dumpers[i].thread.threadId, &attr, ThreadDump, &dumpers[i]) == 0);
597 }
598
599 // Start all of the dumpers going at once.
600 android_atomic_acquire_store(1, &dump_now);
601
602 for (size_t i = 0; i < NUM_THREADS; i++) {
603 ASSERT_TRUE(WaitForNonZero(&dumpers[i].done, 10));
604
605 // Tell the runner thread to exit its infinite loop.
606 android_atomic_acquire_store(0, &runners[i].state);
607
608 ASSERT_TRUE(dumpers[i].context.backtrace != NULL);
609 VerifyMaxDump(dumpers[i].context.backtrace);
610 backtrace_destroy_context(&dumpers[i].context);
611 }
612}
613
614TEST(libbacktrace, format_test) {
615 backtrace_context_t context;
616
Christopher Ferris7f081ec2013-11-05 11:38:05 -0800617 ASSERT_TRUE(backtrace_create_context(&context, BACKTRACE_CURRENT_PROCESS, BACKTRACE_NO_TID, 0));
Christopher Ferris17e91d42013-10-21 13:30:52 -0700618 ASSERT_TRUE(context.backtrace != NULL);
619
Christopher Ferris7f081ec2013-11-05 11:38:05 -0800620 backtrace_frame_data_t* frame = const_cast<backtrace_frame_data_t*>(&context.backtrace->frames[1]);
Christopher Ferris17e91d42013-10-21 13:30:52 -0700621 backtrace_frame_data_t save_frame = *frame;
622
623 memset(frame, 0, sizeof(backtrace_frame_data_t));
624 char buf[512];
625 backtrace_format_frame_data(&context, 1, buf, sizeof(buf));
626#if defined(__LP64__)
627 EXPECT_STREQ(buf, "#01 pc 0000000000000000 <unknown>");
628#else
629 EXPECT_STREQ(buf, "#01 pc 00000000 <unknown>");
630#endif
631
632 frame->pc = 0x12345678;
633 frame->map_name = "MapFake";
634 backtrace_format_frame_data(&context, 1, buf, sizeof(buf));
635#if defined(__LP64__)
636 EXPECT_STREQ(buf, "#01 pc 0000000012345678 MapFake");
637#else
638 EXPECT_STREQ(buf, "#01 pc 12345678 MapFake");
639#endif
640
Christopher Ferris7f081ec2013-11-05 11:38:05 -0800641 frame->func_name = const_cast<char*>("ProcFake");
Christopher Ferris17e91d42013-10-21 13:30:52 -0700642 backtrace_format_frame_data(&context, 1, buf, sizeof(buf));
643#if defined(__LP64__)
644 EXPECT_STREQ(buf, "#01 pc 0000000012345678 MapFake (ProcFake)");
645#else
646 EXPECT_STREQ(buf, "#01 pc 12345678 MapFake (ProcFake)");
647#endif
648
649 frame->func_offset = 645;
650 backtrace_format_frame_data(&context, 1, buf, sizeof(buf));
651#if defined(__LP64__)
652 EXPECT_STREQ(buf, "#01 pc 0000000012345678 MapFake (ProcFake+645)");
653#else
654 EXPECT_STREQ(buf, "#01 pc 12345678 MapFake (ProcFake+645)");
655#endif
656
657 *frame = save_frame;
658
659 backtrace_destroy_context(&context);
660}