blob: b8944462722c703f721df1136195c6857ce75aa4 [file] [log] [blame]
Christopher Ferris17e91d42013-10-21 13:30:52 -07001/*
2 * Copyright (C) 2013 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include <dirent.h>
18#include <errno.h>
19#include <pthread.h>
20#include <signal.h>
21#include <stdbool.h>
22#include <stdio.h>
23#include <stdlib.h>
24#include <string.h>
25#include <sys/ptrace.h>
26#include <sys/types.h>
27#include <sys/wait.h>
28#include <time.h>
29#include <unistd.h>
30
31#include <backtrace/backtrace.h>
32
33#include <cutils/atomic.h>
34#include <gtest/gtest.h>
35
36#include <vector>
37
38#include "thread_utils.h"
39
40// Number of microseconds per milliseconds.
41#define US_PER_MSEC 1000
42
43// Number of nanoseconds in a second.
44#define NS_PER_SEC 1000000000ULL
45
46// Number of simultaneous dumping operations to perform.
47#define NUM_THREADS 20
48
49// Number of simultaneous threads running in our forked process.
50#define NUM_PTRACE_THREADS 5
51
52typedef struct {
53 pid_t tid;
54 int32_t state;
55 pthread_t threadId;
56} thread_t;
57
58typedef struct {
59 thread_t thread;
60 backtrace_context_t context;
61 int32_t* now;
62 int32_t done;
63} dump_thread_t;
64
65extern "C" {
66// Prototypes for functions in the test library.
67int test_level_one(int, int, int, int, void (*)(void*), void*);
68
69int test_recursive_call(int, void (*)(void*), void*);
70}
71
72uint64_t NanoTime() {
73 struct timespec t = { 0, 0 };
74 clock_gettime(CLOCK_MONOTONIC, &t);
75 return static_cast<uint64_t>(t.tv_sec * NS_PER_SEC + t.tv_nsec);
76}
77
78void DumpFrames(const backtrace_context_t* context) {
79 if (context->backtrace->num_frames == 0) {
80 printf(" No frames to dump\n");
81 } else {
82 char line[512];
83 for (size_t i = 0; i < context->backtrace->num_frames; i++) {
84 backtrace_format_frame_data(context, i, line, sizeof(line));
85 printf(" %s\n", line);
86 }
87 }
88}
89
90void WaitForStop(pid_t pid) {
91 uint64_t start = NanoTime();
92
93 siginfo_t si;
94 while (ptrace(PTRACE_GETSIGINFO, pid, 0, &si) < 0 && (errno == EINTR || errno == ESRCH)) {
95 if ((NanoTime() - start) > NS_PER_SEC) {
96 printf("The process did not get to a stopping point in 1 second.\n");
97 break;
98 }
99 usleep(US_PER_MSEC);
100 }
101}
102
103bool ReadyLevelBacktrace(const backtrace_t* backtrace) {
104 // See if test_level_four is in the backtrace.
105 bool found = false;
106 for (size_t i = 0; i < backtrace->num_frames; i++) {
107 if (backtrace->frames[i].func_name != NULL &&
108 strcmp(backtrace->frames[i].func_name, "test_level_four") == 0) {
109 found = true;
110 break;
111 }
112 }
113
114 return found;
115}
116
117void VerifyLevelDump(const backtrace_t* backtrace) {
118 ASSERT_GT(backtrace->num_frames, static_cast<size_t>(0));
119 ASSERT_LT(backtrace->num_frames, static_cast<size_t>(MAX_BACKTRACE_FRAMES));
120
121 // Look through the frames starting at the highest to find the
122 // frame we want.
123 size_t frame_num = 0;
124 for (size_t i = backtrace->num_frames-1; i > 2; i--) {
125 if (backtrace->frames[i].func_name != NULL &&
126 strcmp(backtrace->frames[i].func_name, "test_level_one") == 0) {
127 frame_num = i;
128 break;
129 }
130 }
131 ASSERT_GT(frame_num, static_cast<size_t>(0));
132
133 ASSERT_TRUE(NULL != backtrace->frames[frame_num].func_name);
134 ASSERT_STREQ(backtrace->frames[frame_num].func_name, "test_level_one");
135 ASSERT_TRUE(NULL != backtrace->frames[frame_num-1].func_name);
136 ASSERT_STREQ(backtrace->frames[frame_num-1].func_name, "test_level_two");
137 ASSERT_TRUE(NULL != backtrace->frames[frame_num-2].func_name);
138 ASSERT_STREQ(backtrace->frames[frame_num-2].func_name, "test_level_three");
139 ASSERT_TRUE(NULL != backtrace->frames[frame_num-3].func_name);
140 ASSERT_STREQ(backtrace->frames[frame_num-3].func_name, "test_level_four");
141}
142
143void VerifyLevelBacktrace(void*) {
144 backtrace_context_t context;
145
Christopher Ferrisbc12d632013-11-12 10:54:16 -0800146 ASSERT_TRUE(backtrace_create_context(&context, BACKTRACE_CURRENT_PROCESS,
147 BACKTRACE_CURRENT_THREAD, 0));
Christopher Ferris17e91d42013-10-21 13:30:52 -0700148
149 VerifyLevelDump(context.backtrace);
150
151 backtrace_destroy_context(&context);
152}
153
154bool ReadyMaxBacktrace(const backtrace_t* backtrace) {
155 return (backtrace->num_frames == MAX_BACKTRACE_FRAMES);
156}
157
158void VerifyMaxDump(const backtrace_t* backtrace) {
159 ASSERT_EQ(backtrace->num_frames, static_cast<size_t>(MAX_BACKTRACE_FRAMES));
160 // Verify that the last frame is our recursive call.
161 ASSERT_TRUE(NULL != backtrace->frames[MAX_BACKTRACE_FRAMES-1].func_name);
162 ASSERT_STREQ(backtrace->frames[MAX_BACKTRACE_FRAMES-1].func_name,
163 "test_recursive_call");
164}
165
166void VerifyMaxBacktrace(void*) {
167 backtrace_context_t context;
168
Christopher Ferrisbc12d632013-11-12 10:54:16 -0800169 ASSERT_TRUE(backtrace_create_context(&context, BACKTRACE_CURRENT_PROCESS,
170 BACKTRACE_CURRENT_THREAD, 0));
Christopher Ferris17e91d42013-10-21 13:30:52 -0700171
172 VerifyMaxDump(context.backtrace);
173
174 backtrace_destroy_context(&context);
175}
176
177void ThreadSetState(void* data) {
178 thread_t* thread = reinterpret_cast<thread_t*>(data);
179 android_atomic_acquire_store(1, &thread->state);
180 volatile int i = 0;
181 while (thread->state) {
182 i++;
183 }
184}
185
186void VerifyThreadTest(pid_t tid, void (*VerifyFunc)(const backtrace_t*)) {
187 backtrace_context_t context;
188
189 backtrace_create_context(&context, getpid(), tid, 0);
190
191 VerifyFunc(context.backtrace);
192
193 backtrace_destroy_context(&context);
194}
195
196bool WaitForNonZero(int32_t* value, uint64_t seconds) {
197 uint64_t start = NanoTime();
198 do {
199 if (android_atomic_acquire_load(value)) {
200 return true;
201 }
202 } while ((NanoTime() - start) < seconds * NS_PER_SEC);
203 return false;
204}
205
206TEST(libbacktrace, local_trace) {
207 ASSERT_NE(test_level_one(1, 2, 3, 4, VerifyLevelBacktrace, NULL), 0);
208}
209
210void VerifyIgnoreFrames(
211 const backtrace_t* bt_all, const backtrace_t* bt_ign1,
212 const backtrace_t* bt_ign2, const char* cur_proc) {
213 EXPECT_EQ(bt_all->num_frames, bt_ign1->num_frames + 1);
214 EXPECT_EQ(bt_all->num_frames, bt_ign2->num_frames + 2);
215
216 // Check all of the frames are the same > the current frame.
217 bool check = (cur_proc == NULL);
218 for (size_t i = 0; i < bt_ign2->num_frames; i++) {
219 if (check) {
220 EXPECT_EQ(bt_ign2->frames[i].pc, bt_ign1->frames[i+1].pc);
221 EXPECT_EQ(bt_ign2->frames[i].sp, bt_ign1->frames[i+1].sp);
222 EXPECT_EQ(bt_ign2->frames[i].stack_size, bt_ign1->frames[i+1].stack_size);
223
224 EXPECT_EQ(bt_ign2->frames[i].pc, bt_all->frames[i+2].pc);
225 EXPECT_EQ(bt_ign2->frames[i].sp, bt_all->frames[i+2].sp);
226 EXPECT_EQ(bt_ign2->frames[i].stack_size, bt_all->frames[i+2].stack_size);
227 }
228 if (!check && bt_ign2->frames[i].func_name &&
229 strcmp(bt_ign2->frames[i].func_name, cur_proc) == 0) {
230 check = true;
231 }
232 }
233}
234
235void VerifyLevelIgnoreFrames(void*) {
236 backtrace_context_t all;
Christopher Ferrisbc12d632013-11-12 10:54:16 -0800237 ASSERT_TRUE(backtrace_create_context(&all, BACKTRACE_CURRENT_PROCESS,
238 BACKTRACE_CURRENT_THREAD, 0));
Christopher Ferris17e91d42013-10-21 13:30:52 -0700239 ASSERT_TRUE(all.backtrace != NULL);
240
241 backtrace_context_t ign1;
Christopher Ferrisbc12d632013-11-12 10:54:16 -0800242 ASSERT_TRUE(backtrace_create_context(&ign1, BACKTRACE_CURRENT_PROCESS,
243 BACKTRACE_CURRENT_THREAD, 1));
Christopher Ferris17e91d42013-10-21 13:30:52 -0700244 ASSERT_TRUE(ign1.backtrace != NULL);
245
246 backtrace_context_t ign2;
Christopher Ferrisbc12d632013-11-12 10:54:16 -0800247 ASSERT_TRUE(backtrace_create_context(&ign2, BACKTRACE_CURRENT_PROCESS,
248 BACKTRACE_CURRENT_THREAD, 2));
Christopher Ferris17e91d42013-10-21 13:30:52 -0700249 ASSERT_TRUE(ign2.backtrace != NULL);
250
251 VerifyIgnoreFrames(all.backtrace, ign1.backtrace, ign2.backtrace,
252 "VerifyLevelIgnoreFrames");
253
254 backtrace_destroy_context(&all);
255 backtrace_destroy_context(&ign1);
256 backtrace_destroy_context(&ign2);
257}
258
259TEST(libbacktrace, local_trace_ignore_frames) {
260 ASSERT_NE(test_level_one(1, 2, 3, 4, VerifyLevelIgnoreFrames, NULL), 0);
261}
262
263TEST(libbacktrace, local_max_trace) {
264 ASSERT_NE(test_recursive_call(MAX_BACKTRACE_FRAMES+10, VerifyMaxBacktrace, NULL), 0);
265}
266
267void VerifyProcTest(pid_t pid, pid_t tid,
268 bool (*ReadyFunc)(const backtrace_t*),
269 void (*VerifyFunc)(const backtrace_t*)) {
270 pid_t ptrace_tid;
271 if (tid < 0) {
272 ptrace_tid = pid;
273 } else {
274 ptrace_tid = tid;
275 }
276 uint64_t start = NanoTime();
277 bool verified = false;
278 do {
279 usleep(US_PER_MSEC);
280 if (ptrace(PTRACE_ATTACH, ptrace_tid, 0, 0) == 0) {
281 // Wait for the process to get to a stopping point.
282 WaitForStop(ptrace_tid);
283
284 backtrace_context_t context;
285 ASSERT_TRUE(backtrace_create_context(&context, pid, tid, 0));
286 if (ReadyFunc(context.backtrace)) {
287 VerifyFunc(context.backtrace);
288 verified = true;
289 }
290 backtrace_destroy_context(&context);
291 ASSERT_TRUE(ptrace(PTRACE_DETACH, ptrace_tid, 0, 0) == 0);
292 }
293 // If 5 seconds have passed, then we are done.
294 } while (!verified && (NanoTime() - start) <= 5 * NS_PER_SEC);
295 ASSERT_TRUE(verified);
296}
297
298TEST(libbacktrace, ptrace_trace) {
299 pid_t pid;
300 if ((pid = fork()) == 0) {
301 ASSERT_NE(test_level_one(1, 2, 3, 4, NULL, NULL), 0);
302 exit(1);
303 }
Christopher Ferrisbc12d632013-11-12 10:54:16 -0800304 VerifyProcTest(pid, BACKTRACE_CURRENT_THREAD, ReadyLevelBacktrace, VerifyLevelDump);
Christopher Ferris17e91d42013-10-21 13:30:52 -0700305
306 kill(pid, SIGKILL);
307 int status;
308 ASSERT_EQ(waitpid(pid, &status, 0), pid);
309}
310
311TEST(libbacktrace, ptrace_max_trace) {
312 pid_t pid;
313 if ((pid = fork()) == 0) {
314 ASSERT_NE(test_recursive_call(MAX_BACKTRACE_FRAMES+10, NULL, NULL), 0);
315 exit(1);
316 }
Christopher Ferrisbc12d632013-11-12 10:54:16 -0800317 VerifyProcTest(pid, BACKTRACE_CURRENT_THREAD, ReadyMaxBacktrace, VerifyMaxDump);
Christopher Ferris17e91d42013-10-21 13:30:52 -0700318
319 kill(pid, SIGKILL);
320 int status;
321 ASSERT_EQ(waitpid(pid, &status, 0), pid);
322}
323
324void VerifyProcessIgnoreFrames(const backtrace_t* bt_all) {
325 pid_t pid = bt_all->pid;
326
327 backtrace_context_t ign1;
Christopher Ferrisbc12d632013-11-12 10:54:16 -0800328 ASSERT_TRUE(backtrace_create_context(&ign1, pid, BACKTRACE_CURRENT_THREAD, 1));
Christopher Ferris17e91d42013-10-21 13:30:52 -0700329 ASSERT_TRUE(ign1.backtrace != NULL);
330
331 backtrace_context_t ign2;
Christopher Ferrisbc12d632013-11-12 10:54:16 -0800332 ASSERT_TRUE(backtrace_create_context(&ign2, pid, BACKTRACE_CURRENT_THREAD, 2));
Christopher Ferris17e91d42013-10-21 13:30:52 -0700333 ASSERT_TRUE(ign2.backtrace != NULL);
334
335 VerifyIgnoreFrames(bt_all, ign1.backtrace, ign2.backtrace, NULL);
336
337 backtrace_destroy_context(&ign1);
338 backtrace_destroy_context(&ign2);
339}
340
341TEST(libbacktrace, ptrace_ignore_frames) {
342 pid_t pid;
343 if ((pid = fork()) == 0) {
344 ASSERT_NE(test_level_one(1, 2, 3, 4, NULL, NULL), 0);
345 exit(1);
346 }
Christopher Ferrisbc12d632013-11-12 10:54:16 -0800347 VerifyProcTest(pid, BACKTRACE_CURRENT_THREAD, ReadyLevelBacktrace, VerifyProcessIgnoreFrames);
Christopher Ferris17e91d42013-10-21 13:30:52 -0700348
349 kill(pid, SIGKILL);
350 int status;
351 ASSERT_EQ(waitpid(pid, &status, 0), pid);
352}
353
354// Create a process with multiple threads and dump all of the threads.
355void* PtraceThreadLevelRun(void*) {
356 EXPECT_NE(test_level_one(1, 2, 3, 4, NULL, NULL), 0);
357 return NULL;
358}
359
360void GetThreads(pid_t pid, std::vector<pid_t>* threads) {
361 // Get the list of tasks.
362 char task_path[128];
363 snprintf(task_path, sizeof(task_path), "/proc/%d/task", pid);
364
365 DIR* tasks_dir = opendir(task_path);
366 ASSERT_TRUE(tasks_dir != NULL);
367 struct dirent* entry;
368 while ((entry = readdir(tasks_dir)) != NULL) {
369 char* end;
370 pid_t tid = strtoul(entry->d_name, &end, 10);
371 if (*end == '\0') {
372 threads->push_back(tid);
373 }
374 }
375 closedir(tasks_dir);
376}
377
378TEST(libbacktrace, ptrace_threads) {
379 pid_t pid;
380 if ((pid = fork()) == 0) {
381 for (size_t i = 0; i < NUM_PTRACE_THREADS; i++) {
382 pthread_attr_t attr;
383 pthread_attr_init(&attr);
384 pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
385
386 pthread_t thread;
387 ASSERT_TRUE(pthread_create(&thread, &attr, PtraceThreadLevelRun, NULL) == 0);
388 }
389 ASSERT_NE(test_level_one(1, 2, 3, 4, NULL, NULL), 0);
390 exit(1);
391 }
392
393 // Check to see that all of the threads are running before unwinding.
394 std::vector<pid_t> threads;
395 uint64_t start = NanoTime();
396 do {
397 usleep(US_PER_MSEC);
398 threads.clear();
399 GetThreads(pid, &threads);
400 } while ((threads.size() != NUM_PTRACE_THREADS + 1) &&
401 ((NanoTime() - start) <= 5 * NS_PER_SEC));
402 ASSERT_EQ(threads.size(), static_cast<size_t>(NUM_PTRACE_THREADS + 1));
403
404 ASSERT_TRUE(ptrace(PTRACE_ATTACH, pid, 0, 0) == 0);
405 WaitForStop(pid);
406 for (std::vector<int>::const_iterator it = threads.begin(); it != threads.end(); ++it) {
407 // Skip the current forked process, we only care about the threads.
408 if (pid == *it) {
409 continue;
410 }
411 VerifyProcTest(pid, *it, ReadyLevelBacktrace, VerifyLevelDump);
412 }
413 ASSERT_TRUE(ptrace(PTRACE_DETACH, pid, 0, 0) == 0);
414
415 kill(pid, SIGKILL);
416 int status;
417 ASSERT_EQ(waitpid(pid, &status, 0), pid);
418}
419
420void VerifyLevelThread(void*) {
421 backtrace_context_t context;
422
423 ASSERT_TRUE(backtrace_create_context(&context, getpid(), gettid(), 0));
424
425 VerifyLevelDump(context.backtrace);
426
427 backtrace_destroy_context(&context);
428}
429
430TEST(libbacktrace, thread_current_level) {
431 ASSERT_NE(test_level_one(1, 2, 3, 4, VerifyLevelThread, NULL), 0);
432}
433
434void VerifyMaxThread(void*) {
435 backtrace_context_t context;
436
437 ASSERT_TRUE(backtrace_create_context(&context, getpid(), gettid(), 0));
438
439 VerifyMaxDump(context.backtrace);
440
441 backtrace_destroy_context(&context);
442}
443
444TEST(libbacktrace, thread_current_max) {
445 ASSERT_NE(test_recursive_call(MAX_BACKTRACE_FRAMES+10, VerifyMaxThread, NULL), 0);
446}
447
448void* ThreadLevelRun(void* data) {
449 thread_t* thread = reinterpret_cast<thread_t*>(data);
450
451 thread->tid = gettid();
452 EXPECT_NE(test_level_one(1, 2, 3, 4, ThreadSetState, data), 0);
453 return NULL;
454}
455
456TEST(libbacktrace, thread_level_trace) {
457 pthread_attr_t attr;
458 pthread_attr_init(&attr);
459 pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
460
461 thread_t thread_data = { 0, 0, 0 };
462 pthread_t thread;
463 ASSERT_TRUE(pthread_create(&thread, &attr, ThreadLevelRun, &thread_data) == 0);
464
465 // Wait up to 2 seconds for the tid to be set.
466 ASSERT_TRUE(WaitForNonZero(&thread_data.state, 2));
467
468 // Save the current signal action and make sure it is restored afterwards.
469 struct sigaction cur_action;
470 ASSERT_TRUE(sigaction(SIGURG, NULL, &cur_action) == 0);
471
472 backtrace_context_t context;
473
474 ASSERT_TRUE(backtrace_create_context(&context, getpid(), thread_data.tid,0));
475
476 VerifyLevelDump(context.backtrace);
477
478 backtrace_destroy_context(&context);
479
480 // Tell the thread to exit its infinite loop.
481 android_atomic_acquire_store(0, &thread_data.state);
482
483 // Verify that the old action was restored.
484 struct sigaction new_action;
485 ASSERT_TRUE(sigaction(SIGURG, NULL, &new_action) == 0);
486 EXPECT_EQ(cur_action.sa_sigaction, new_action.sa_sigaction);
487 EXPECT_EQ(cur_action.sa_flags, new_action.sa_flags);
488}
489
490TEST(libbacktrace, thread_ignore_frames) {
491 pthread_attr_t attr;
492 pthread_attr_init(&attr);
493 pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
494
495 thread_t thread_data = { 0, 0, 0 };
496 pthread_t thread;
497 ASSERT_TRUE(pthread_create(&thread, &attr, ThreadLevelRun, &thread_data) == 0);
498
499 // Wait up to 2 seconds for the tid to be set.
500 ASSERT_TRUE(WaitForNonZero(&thread_data.state, 2));
501
502 backtrace_context_t all;
503 ASSERT_TRUE(backtrace_create_context(&all, getpid(), thread_data.tid, 0));
504
505 backtrace_context_t ign1;
506 ASSERT_TRUE(backtrace_create_context(&ign1, getpid(), thread_data.tid, 1));
507
508 backtrace_context_t ign2;
509 ASSERT_TRUE(backtrace_create_context(&ign2, getpid(), thread_data.tid, 2));
510
511 VerifyIgnoreFrames(all.backtrace, ign1.backtrace, ign2.backtrace, NULL);
512
513 backtrace_destroy_context(&all);
514 backtrace_destroy_context(&ign1);
515 backtrace_destroy_context(&ign2);
516
517 // Tell the thread to exit its infinite loop.
518 android_atomic_acquire_store(0, &thread_data.state);
519}
520
521void* ThreadMaxRun(void* data) {
522 thread_t* thread = reinterpret_cast<thread_t*>(data);
523
524 thread->tid = gettid();
525 EXPECT_NE(test_recursive_call(MAX_BACKTRACE_FRAMES+10, ThreadSetState, data), 0);
526 return NULL;
527}
528
529TEST(libbacktrace, thread_max_trace) {
530 pthread_attr_t attr;
531 pthread_attr_init(&attr);
532 pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
533
534 thread_t thread_data = { 0, 0, 0 };
535 pthread_t thread;
536 ASSERT_TRUE(pthread_create(&thread, &attr, ThreadMaxRun, &thread_data) == 0);
537
538 // Wait for the tid to be set.
539 ASSERT_TRUE(WaitForNonZero(&thread_data.state, 2));
540
541 backtrace_context_t context;
542
543 ASSERT_TRUE(backtrace_create_context(&context, getpid(), thread_data.tid, 0));
544
545 VerifyMaxDump(context.backtrace);
546
547 backtrace_destroy_context(&context);
548
549 // Tell the thread to exit its infinite loop.
550 android_atomic_acquire_store(0, &thread_data.state);
551}
552
553void* ThreadDump(void* data) {
554 dump_thread_t* dump = reinterpret_cast<dump_thread_t*>(data);
555 while (true) {
556 if (android_atomic_acquire_load(dump->now)) {
557 break;
558 }
559 }
560
561 dump->context.data = NULL;
562 dump->context.backtrace = NULL;
563
564 // The status of the actual unwind will be checked elsewhere.
565 backtrace_create_context(&dump->context, getpid(), dump->thread.tid, 0);
566
567 android_atomic_acquire_store(1, &dump->done);
568
569 return NULL;
570}
571
572TEST(libbacktrace, thread_multiple_dump) {
573 // Dump NUM_THREADS simultaneously.
574 std::vector<thread_t> runners(NUM_THREADS);
575 std::vector<dump_thread_t> dumpers(NUM_THREADS);
576
577 pthread_attr_t attr;
578 pthread_attr_init(&attr);
579 pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
580 for (size_t i = 0; i < NUM_THREADS; i++) {
581 // Launch the runners, they will spin in hard loops doing nothing.
582 runners[i].tid = 0;
583 runners[i].state = 0;
584 ASSERT_TRUE(pthread_create(&runners[i].threadId, &attr, ThreadMaxRun, &runners[i]) == 0);
585 }
586
587 // Wait for tids to be set.
588 for (std::vector<thread_t>::iterator it = runners.begin(); it != runners.end(); ++it) {
589 ASSERT_TRUE(WaitForNonZero(&it->state, 10));
590 }
591
592 // Start all of the dumpers at once, they will spin until they are signalled
593 // to begin their dump run.
594 int32_t dump_now = 0;
595 for (size_t i = 0; i < NUM_THREADS; i++) {
596 dumpers[i].thread.tid = runners[i].tid;
597 dumpers[i].thread.state = 0;
598 dumpers[i].done = 0;
599 dumpers[i].now = &dump_now;
600
601 ASSERT_TRUE(pthread_create(&dumpers[i].thread.threadId, &attr, ThreadDump, &dumpers[i]) == 0);
602 }
603
604 // Start all of the dumpers going at once.
605 android_atomic_acquire_store(1, &dump_now);
606
607 for (size_t i = 0; i < NUM_THREADS; i++) {
608 ASSERT_TRUE(WaitForNonZero(&dumpers[i].done, 10));
609
610 // Tell the runner thread to exit its infinite loop.
611 android_atomic_acquire_store(0, &runners[i].state);
612
613 ASSERT_TRUE(dumpers[i].context.backtrace != NULL);
614 VerifyMaxDump(dumpers[i].context.backtrace);
615 backtrace_destroy_context(&dumpers[i].context);
616 }
617}
618
619TEST(libbacktrace, format_test) {
620 backtrace_context_t context;
621
Christopher Ferrisbc12d632013-11-12 10:54:16 -0800622 ASSERT_TRUE(backtrace_create_context(&context, BACKTRACE_CURRENT_PROCESS,
623 BACKTRACE_CURRENT_THREAD, 0));
Christopher Ferris17e91d42013-10-21 13:30:52 -0700624 ASSERT_TRUE(context.backtrace != NULL);
625
Christopher Ferrisbc12d632013-11-12 10:54:16 -0800626 backtrace_frame_data_t* frame =
627 const_cast<backtrace_frame_data_t*>(&context.backtrace->frames[1]);
Christopher Ferris17e91d42013-10-21 13:30:52 -0700628 backtrace_frame_data_t save_frame = *frame;
629
630 memset(frame, 0, sizeof(backtrace_frame_data_t));
631 char buf[512];
632 backtrace_format_frame_data(&context, 1, buf, sizeof(buf));
633#if defined(__LP64__)
634 EXPECT_STREQ(buf, "#01 pc 0000000000000000 <unknown>");
635#else
636 EXPECT_STREQ(buf, "#01 pc 00000000 <unknown>");
637#endif
638
639 frame->pc = 0x12345678;
640 frame->map_name = "MapFake";
641 backtrace_format_frame_data(&context, 1, buf, sizeof(buf));
642#if defined(__LP64__)
643 EXPECT_STREQ(buf, "#01 pc 0000000012345678 MapFake");
644#else
645 EXPECT_STREQ(buf, "#01 pc 12345678 MapFake");
646#endif
647
Christopher Ferris7f081ec2013-11-05 11:38:05 -0800648 frame->func_name = const_cast<char*>("ProcFake");
Christopher Ferris17e91d42013-10-21 13:30:52 -0700649 backtrace_format_frame_data(&context, 1, buf, sizeof(buf));
650#if defined(__LP64__)
651 EXPECT_STREQ(buf, "#01 pc 0000000012345678 MapFake (ProcFake)");
652#else
653 EXPECT_STREQ(buf, "#01 pc 12345678 MapFake (ProcFake)");
654#endif
655
656 frame->func_offset = 645;
657 backtrace_format_frame_data(&context, 1, buf, sizeof(buf));
658#if defined(__LP64__)
659 EXPECT_STREQ(buf, "#01 pc 0000000012345678 MapFake (ProcFake+645)");
660#else
661 EXPECT_STREQ(buf, "#01 pc 12345678 MapFake (ProcFake+645)");
662#endif
663
664 *frame = save_frame;
665
666 backtrace_destroy_context(&context);
667}