Fortran · 9741 bytes Raw Blame History
1 ! ==============================================================================
2 ! Simplified test program for Phase 6 - Lexer module with memory pooling
3 ! ==============================================================================
4 program test_lexer_simple
5 use string_pool
6 use memory_dashboard
7 use shell_types
8 use iso_fortran_env, only: output_unit
9 implicit none
10
11 type(string_ref) :: token_ref, word_ref, str_literal_ref, var_ref
12 type(string_ref), allocatable :: token_refs(:)
13 integer :: i, j
14 logical :: test_passed
15 integer :: total_allocs, total_deallocs, current_strings, peak_strings
16 real :: hit_rate
17 character(:), pointer :: str_ptr
18
19 test_passed = .true.
20
21 print *, "=== Phase 6 Lexer Memory Pooling Test (Simplified) ==="
22 print *, "Testing pooled memory for tokenization"
23 print *
24
25 ! Initialize the pool and dashboard
26 call pool_init()
27 call dashboard_init(verbose=.false.)
28
29 ! Test 1: Token value strings
30 print *, "Test 1: Testing token value string allocation..."
31
32 ! Simulate creating tokens with different value sizes
33 allocate(token_refs(5))
34
35 ! Small token (operator)
36 token_refs(1) = pool_get_string(2) ! "||"
37 call dashboard_track_allocation(MOD_LEXER, 2, 1)
38 call pool_copy_to_ref(token_refs(1), "||")
39
40 ! Medium token (keyword)
41 token_refs(2) = pool_get_string(8) ! "function"
42 call dashboard_track_allocation(MOD_LEXER, 8, 1)
43 call pool_copy_to_ref(token_refs(2), "function")
44
45 ! Word token
46 token_refs(3) = pool_get_string(64) ! typical command name
47 call dashboard_track_allocation(MOD_LEXER, 64, 1)
48 call pool_copy_to_ref(token_refs(3), "execute_command_with_long_name")
49
50 ! String literal
51 token_refs(4) = pool_get_string(256) ! quoted string
52 call dashboard_track_allocation(MOD_LEXER, 256, 2)
53 call pool_copy_to_ref(token_refs(4), "This is a longer string literal with spaces and special chars")
54
55 ! Variable name
56 token_refs(5) = pool_get_string(32) ! $VARIABLE
57 call dashboard_track_allocation(MOD_LEXER, 32, 1)
58 call pool_copy_to_ref(token_refs(5), "PATH_TO_EXECUTABLE")
59
60 print *, " Created 5 token value strings:"
61 print *, " Token 1 (2B):", trim(token_refs(1)%data)
62 print *, " Token 2 (8B):", trim(token_refs(2)%data)
63 print *, " Token 3 (64B):", trim(token_refs(3)%data(1:30)), "..."
64 print *, " Token 4 (256B):", trim(token_refs(4)%data(1:40)), "..."
65 print *, " Token 5 (32B):", trim(token_refs(5)%data)
66
67 ! Verify pooling
68 if (token_refs(1)%pool_index /= 0 .and. token_refs(4)%pool_index /= 0) then
69 print *, " PASSED: Token values allocated from pool"
70 else
71 print *, " FAILED: Token values not from pool"
72 test_passed = .false.
73 end if
74
75 ! Release token strings
76 do i = 1, 5
77 call pool_release_string(token_refs(i))
78 call dashboard_track_deallocation(MOD_LEXER, token_refs(i)%str_len, &
79 get_bucket_for_size(token_refs(i)%str_len))
80 end do
81 deallocate(token_refs)
82
83 ! Test 2: Input buffer pooling
84 print *, ""
85 print *, "Test 2: Testing input buffer pooling..."
86
87 ! Simulate input string for lexer (typical command line)
88 word_ref = pool_get_string(512)
89 call dashboard_track_allocation(MOD_LEXER, 512, 3)
90 call pool_copy_to_ref(word_ref, &
91 "if [ $? -eq 0 ]; then echo 'Success' | tee output.log; else echo 'Failed'; fi")
92
93 str_ptr => word_ref%data
94 if (associated(str_ptr)) then
95 print *, " Input buffer (512B) allocated"
96 print *, " Content:", trim(str_ptr(1:50)), "..."
97 print *, " PASSED: Input buffer working"
98 end if
99
100 call pool_release_string(word_ref)
101 call dashboard_track_deallocation(MOD_LEXER, 512, 3)
102
103 ! Test 3: Temporary string buffers during tokenization
104 print *, ""
105 print *, "Test 3: Testing temporary tokenization buffers..."
106
107 ! Simulate temporary buffers used during read_word, read_string, read_variable
108 word_ref = pool_get_string(256) ! For word reading
109 str_literal_ref = pool_get_string(1024) ! For string literal reading
110 var_ref = pool_get_string(64) ! For variable name reading
111
112 call dashboard_track_allocation(MOD_LEXER, 256, 2)
113 call dashboard_track_allocation(MOD_LEXER, 1024, 3)
114 call dashboard_track_allocation(MOD_LEXER, 64, 1)
115
116 call pool_copy_to_ref(word_ref, "temporary_word_buffer")
117 call pool_copy_to_ref(str_literal_ref, "temporary string buffer for quoted literals")
118 call pool_copy_to_ref(var_ref, "TEMP_VAR")
119
120 print *, " Allocated 3 temporary buffers:"
121 print *, " Word buffer (256B):", trim(word_ref%data)
122 print *, " String buffer (1024B):", trim(str_literal_ref%data)
123 print *, " Variable buffer (64B):", trim(var_ref%data)
124
125 call pool_release_string(word_ref)
126 call pool_release_string(str_literal_ref)
127 call pool_release_string(var_ref)
128
129 call dashboard_track_deallocation(MOD_LEXER, 256, 2)
130 call dashboard_track_deallocation(MOD_LEXER, 1024, 3)
131 call dashboard_track_deallocation(MOD_LEXER, 64, 1)
132
133 print *, " Released all temporary buffers"
134
135 ! Test 4: Token array simulation (many small allocations)
136 print *, ""
137 print *, "Test 4: Simulating token array with 100 tokens..."
138
139 allocate(token_refs(100))
140 do i = 1, 100
141 ! Vary token sizes to simulate real tokenization
142 select case(mod(i, 4))
143 case(0) ! Small operator/keyword
144 token_refs(i) = pool_get_string(8)
145 call dashboard_track_allocation(MOD_LEXER, 8, 1)
146 call pool_copy_to_ref(token_refs(i), "keyword")
147 case(1) ! Medium word
148 token_refs(i) = pool_get_string(32)
149 call dashboard_track_allocation(MOD_LEXER, 32, 1)
150 call pool_copy_to_ref(token_refs(i), "command_name")
151 case(2) ! Larger string
152 token_refs(i) = pool_get_string(128)
153 call dashboard_track_allocation(MOD_LEXER, 128, 2)
154 call pool_copy_to_ref(token_refs(i), "longer_string_value")
155 case(3) ! Variable
156 token_refs(i) = pool_get_string(16)
157 call dashboard_track_allocation(MOD_LEXER, 16, 1)
158 call pool_copy_to_ref(token_refs(i), "VAR")
159 end select
160 end do
161
162 print *, " Created 100 tokens with varied sizes"
163 print *, " Sample token 1:", trim(token_refs(1)%data)
164 print *, " Sample token 50:", trim(token_refs(50)%data)
165 print *, " Sample token 100:", trim(token_refs(100)%data)
166
167 ! Release all tokens
168 do i = 1, 100
169 call pool_release_string(token_refs(i))
170 call dashboard_track_deallocation(MOD_LEXER, token_refs(i)%str_len, &
171 get_bucket_for_size(token_refs(i)%str_len))
172 end do
173 deallocate(token_refs)
174
175 print *, " Released all 100 tokens"
176
177 ! Test 5: Stress test - rapid tokenization cycles
178 print *, ""
179 print *, "Test 5: Stress testing with 1000 tokenization cycles..."
180 do i = 1, 1000
181 ! Simulate a tokenization cycle
182 token_ref = pool_get_string(64) ! Token value
183 word_ref = pool_get_string(256) ! Temp buffer
184
185 call dashboard_track_allocation(MOD_LEXER, 64, 1)
186 call dashboard_track_allocation(MOD_LEXER, 256, 2)
187
188 ! Simulate some work
189 call pool_copy_to_ref(token_ref, "token")
190 call pool_copy_to_ref(word_ref, "buffer")
191
192 ! Release
193 call pool_release_string(token_ref)
194 call pool_release_string(word_ref)
195
196 call dashboard_track_deallocation(MOD_LEXER, 64, 1)
197 call dashboard_track_deallocation(MOD_LEXER, 256, 2)
198 end do
199 print *, " Completed 1000 tokenization cycles"
200
201 ! Test 6: Check for memory leaks
202 print *, ""
203 print *, "Test 6: Checking for memory leaks..."
204 call pool_statistics(total_allocs, total_deallocs, current_strings, peak_strings, hit_rate)
205
206 print *, " Total allocations:", total_allocs
207 print *, " Total deallocations:", total_deallocs
208 print *, " Current strings:", current_strings
209 print *, " Peak strings:", peak_strings
210 print *, " Cache hit rate:", int(hit_rate * 100), "%"
211
212 if (current_strings == 0) then
213 print *, " PASSED: No memory leaks"
214 else
215 print *, " FAILED: Memory leak -", current_strings, "strings still allocated"
216 test_passed = .false.
217 end if
218
219 ! Display dashboard
220 print *, ""
221 print *, "=== Lexer Module Statistics ==="
222 call dashboard_display(detailed=.false.)
223
224 ! Export statistics
225 call dashboard_export_csv("lexer_pooling_test.csv")
226 print *, ""
227 print *, "Statistics exported to lexer_pooling_test.csv"
228
229 ! Clean up
230 call dashboard_cleanup()
231 call pool_cleanup()
232
233 ! Summary
234 print *, ""
235 print *, "=== Test Summary ==="
236 if (test_passed .and. current_strings == 0) then
237 print *, "ALL TESTS PASSED"
238 print *, ""
239 print *, "Lexer pooling integration verified:"
240 print *, " - Token value strings (2-256 bytes) working"
241 print *, " - Input buffer (512 bytes) working"
242 print *, " - Temporary buffers (64-1024 bytes) working"
243 print *, " - Token array management (100 tokens) working"
244 print *, " - No memory leaks detected"
245 print *, " - Dashboard tracking successful"
246 print *, " - Cache hit rate:", int(hit_rate * 100), "%"
247 print *, ""
248 print *, "Ready to integrate into production lexer module!"
249 else
250 print *, "SOME TESTS FAILED"
251 if (current_strings > 0) then
252 print *, " Memory leak:", current_strings, "strings not released"
253 end if
254 end if
255
256 contains
257
258 ! Helper: Get bucket index for size
259 function get_bucket_for_size(size_bytes) result(bucket_idx)
260 integer, intent(in) :: size_bytes
261 integer :: bucket_idx
262
263 if (size_bytes <= 64) then
264 bucket_idx = 1
265 else if (size_bytes <= 256) then
266 bucket_idx = 2
267 else if (size_bytes <= 1024) then
268 bucket_idx = 3
269 else if (size_bytes <= 4096) then
270 bucket_idx = 4
271 else if (size_bytes <= 16384) then
272 bucket_idx = 5
273 else
274 bucket_idx = 0 ! Direct allocation
275 end if
276 end function get_bucket_for_size
277
278 end program test_lexer_simple