blob: cdd3e27e775dc9131d11497b84cad5c09f497868 [file] [log] [blame]
Colin Cross363441b2013-11-07 23:06:58 -08001/*
2 * Copyright (C) 2013 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include <sys/mman.h>
18
19#include <gtest/gtest.h>
20
21#include <ion/ion.h>
22
23#include "ion_test_fixture.h"
24
25class Exit : public IonAllHeapsTest {
26};
27
28TEST_F(Exit, WithAlloc)
29{
30 static const size_t allocationSizes[] = {4*1024, 64*1024, 1024*1024, 2*1024*1024};
31 for (unsigned int heapMask : m_allHeaps) {
32 for (size_t size : allocationSizes) {
33 SCOPED_TRACE(::testing::Message() << "heap " << heapMask);
34 SCOPED_TRACE(::testing::Message() << "size " << size);
35 EXPECT_EXIT({
36 ion_user_handle_t handle = 0;
37
38 ASSERT_EQ(0, ion_alloc(m_ionFd, size, 0, heapMask, 0, &handle));
39 ASSERT_TRUE(handle != 0);
40 exit(0);
41 }, ::testing::ExitedWithCode(0), "");
42 }
43 }
44}
45
46TEST_F(Exit, WithAllocFd)
47{
48 static const size_t allocationSizes[] = {4*1024, 64*1024, 1024*1024, 2*1024*1024};
49 for (unsigned int heapMask : m_allHeaps) {
50 for (size_t size : allocationSizes) {
51 SCOPED_TRACE(::testing::Message() << "heap " << heapMask);
52 SCOPED_TRACE(::testing::Message() << "size " << size);
53 EXPECT_EXIT({
54 int handle_fd = -1;
55
56 ASSERT_EQ(0, ion_alloc_fd(m_ionFd, size, 0, heapMask, 0, &handle_fd));
57 ASSERT_NE(-1, handle_fd);
58 exit(0);
59 }, ::testing::ExitedWithCode(0), "");
60 }
61 }
62}
63
64TEST_F(Exit, WithRepeatedAllocFd)
65{
66 static const size_t allocationSizes[] = {4*1024, 64*1024, 1024*1024, 2*1024*1024};
67 for (unsigned int heapMask : m_allHeaps) {
68 for (size_t size : allocationSizes) {
69 for (unsigned int i = 0; i < 1024; i++) {
70 SCOPED_TRACE(::testing::Message() << "heap " << heapMask);
71 SCOPED_TRACE(::testing::Message() << "size " << size);
72 ASSERT_EXIT({
73 int handle_fd = -1;
74
75 ASSERT_EQ(0, ion_alloc_fd(m_ionFd, size, 0, heapMask, 0, &handle_fd));
76 ASSERT_NE(-1, handle_fd);
77 exit(0);
78 }, ::testing::ExitedWithCode(0), "")
79 << "failed on heap " << heapMask
80 << " and size " << size
81 << " on iteration " << i;
82 }
83 }
84 }
85}
86
87
88TEST_F(Exit, WithMapping)
89{
90 static const size_t allocationSizes[] = {4*1024, 64*1024, 1024*1024, 2*1024*1024};
91 for (unsigned int heapMask : m_allHeaps) {
92 for (size_t size : allocationSizes) {
93 SCOPED_TRACE(::testing::Message() << "heap " << heapMask);
94 SCOPED_TRACE(::testing::Message() << "size " << size);
95 EXPECT_EXIT({
96 int map_fd = -1;
97
98 ASSERT_EQ(0, ion_alloc_fd(m_ionFd, size, 0, heapMask, 0, &map_fd));
99 ASSERT_GE(map_fd, 0);
100
101 void *ptr;
102 ptr = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED, map_fd, 0);
103 ASSERT_TRUE(ptr != NULL);
104 exit(0);
105 }, ::testing::ExitedWithCode(0), "");
106 }
107 }
108
109}
110
111TEST_F(Exit, WithPartialMapping)
112{
113 static const size_t allocationSizes[] = {64*1024, 1024*1024, 2*1024*1024};
114 for (unsigned int heapMask : m_allHeaps) {
115 for (size_t size : allocationSizes) {
116 SCOPED_TRACE(::testing::Message() << "heap " << heapMask);
117 SCOPED_TRACE(::testing::Message() << "size " << size);
118 EXPECT_EXIT({
119 int map_fd = -1;
120
121 ASSERT_EQ(0, ion_alloc_fd(m_ionFd, size, 0, heapMask, 0, &map_fd));
122 ASSERT_GE(map_fd, 0);
123
124 void *ptr;
125 ptr = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED, map_fd, 0);
126 ASSERT_TRUE(ptr != NULL);
127
128 ASSERT_EQ(0, munmap(ptr, size / 2));
129 exit(0);
130 }, ::testing::ExitedWithCode(0), "");
131 }
132 }
133}
134
135TEST_F(Exit, WithMappingCached)
136{
137 static const size_t allocationSizes[] = {4*1024, 64*1024, 1024*1024, 2*1024*1024};
138 for (unsigned int heapMask : m_allHeaps) {
139 for (size_t size : allocationSizes) {
140 SCOPED_TRACE(::testing::Message() << "heap " << heapMask);
141 SCOPED_TRACE(::testing::Message() << "size " << size);
142 EXPECT_EXIT({
143 int map_fd = -1;
144
145 ASSERT_EQ(0, ion_alloc_fd(m_ionFd, size, 0, heapMask, ION_FLAG_CACHED, &map_fd));
146 ASSERT_GE(map_fd, 0);
147
148 void *ptr;
149 ptr = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED, map_fd, 0);
150 ASSERT_TRUE(ptr != NULL);
151 exit(0);
152 }, ::testing::ExitedWithCode(0), "");
153 }
154 }
155
156}
157
158TEST_F(Exit, WithPartialMappingCached)
159{
160 static const size_t allocationSizes[] = {64*1024, 1024*1024, 2*1024*1024};
161 for (unsigned int heapMask : m_allHeaps) {
162 for (size_t size : allocationSizes) {
163 SCOPED_TRACE(::testing::Message() << "heap " << heapMask);
164 SCOPED_TRACE(::testing::Message() << "size " << size);
165 EXPECT_EXIT({
166 int map_fd = -1;
167
168 ASSERT_EQ(0, ion_alloc_fd(m_ionFd, size, 0, heapMask, ION_FLAG_CACHED, &map_fd));
169 ASSERT_GE(map_fd, 0);
170
171 void *ptr;
172 ptr = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED, map_fd, 0);
173 ASSERT_TRUE(ptr != NULL);
174
175 ASSERT_EQ(0, munmap(ptr, size / 2));
176 exit(0);
177 }, ::testing::ExitedWithCode(0), "");
178 }
179 }
180}
181
182TEST_F(Exit, WithMappingNeedsSync)
183{
184 static const size_t allocationSizes[] = {4*1024, 64*1024, 1024*1024, 2*1024*1024};
185 for (unsigned int heapMask : m_allHeaps) {
186 for (size_t size : allocationSizes) {
187 SCOPED_TRACE(::testing::Message() << "heap " << heapMask);
188 SCOPED_TRACE(::testing::Message() << "size " << size);
189 EXPECT_EXIT({
190 int map_fd = -1;
191
192 ASSERT_EQ(0, ion_alloc_fd(m_ionFd, size, 0, heapMask, ION_FLAG_CACHED | ION_FLAG_CACHED_NEEDS_SYNC, &map_fd));
193 ASSERT_GE(map_fd, 0);
194
195 void *ptr;
196 ptr = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED, map_fd, 0);
197 ASSERT_TRUE(ptr != NULL);
198 exit(0);
199 }, ::testing::ExitedWithCode(0), "");
200 }
201 }
202
203}
204
205TEST_F(Exit, WithPartialMappingNeedsSync)
206{
207 static const size_t allocationSizes[] = {64*1024, 1024*1024, 2*1024*1024};
208 for (unsigned int heapMask : m_allHeaps) {
209 for (size_t size : allocationSizes) {
210 SCOPED_TRACE(::testing::Message() << "heap " << heapMask);
211 SCOPED_TRACE(::testing::Message() << "size " << size);
212 EXPECT_EXIT({
213 int map_fd = -1;
214
215 ASSERT_EQ(0, ion_alloc_fd(m_ionFd, size, 0, heapMask, ION_FLAG_CACHED | ION_FLAG_CACHED_NEEDS_SYNC, &map_fd));
216 ASSERT_GE(map_fd, 0);
217
218 void *ptr;
219 ptr = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED, map_fd, 0);
220 ASSERT_TRUE(ptr != NULL);
221
222 ASSERT_EQ(0, munmap(ptr, size / 2));
223 exit(0);
224 }, ::testing::ExitedWithCode(0), "");
225 }
226 }
227}