slvm/vm/
call.rs

1//! Vm functions to handle runtime calling of anything callable.
2
3use std::sync::Arc;
4
5use crate::{CallFrame, Chunk, Continuation, GVm, VMError, VMResult, Value, mov_register};
6
7impl<ENV> GVm<ENV> {
8    /// Setup the rest (&) arguments for a callable.
9    pub(crate) fn setup_rest(
10        &mut self,
11        chunk: &Arc<Chunk>,
12        first_reg: u16,
13        num_args: u16,
14    ) -> (usize, Value) {
15        let rest_reg = first_reg + chunk.args + chunk.opt_args;
16        let v = if num_args < (chunk.args + chunk.opt_args) {
17            Value::Nil
18        } else {
19            let rest_len = (num_args - (chunk.args + chunk.opt_args)) as usize + 1;
20            let mut r = vec![Value::Undefined; rest_len];
21            r.copy_from_slice(
22                &self.register_slice()[rest_reg as usize..(rest_reg as usize + rest_len)],
23            );
24            self.alloc_list_ro(r)
25        };
26        (rest_reg.into(), v)
27    }
28
29    fn k_unshared_stack(&self, stack_top: usize, k: &Continuation) -> Option<(usize, &Vec<Value>)> {
30        if !k.stack.is_empty() {
31            if k.frame.stack_top >= stack_top {
32                if let Value::CallFrame(h) = self.stack(stack_top) {
33                    let frame = self.heap().get_callframe(h);
34                    if let Value::CallFrame(k_h) = k.stack[stack_top] {
35                        let k_frame = self.heap().get_callframe(k_h);
36                        if frame.id != k_frame.id {
37                            return Some((frame.stack_top, &frame.defers));
38                        }
39                    } else {
40                        return Some((frame.stack_top, &frame.defers));
41                    }
42                }
43            } else if let Value::CallFrame(h) = self.stack(stack_top) {
44                let frame = self.heap().get_callframe(h);
45                // If the continuation and frame have the same id then don't use the frame defers
46                // (will lead to double deferring in some cases without this).
47                return if k.frame.id == frame.id {
48                    None
49                } else {
50                    Some((frame.stack_top, &frame.defers))
51                };
52            }
53        }
54        None
55    }
56
57    fn k_defers(&self, k: &Continuation) -> Option<usize> {
58        if !self.defers.is_empty() {
59            return None;
60        }
61        let mut stack_top = if let Some(k_stack_top) = self.k_stack_top {
62            k_stack_top
63        } else {
64            self.stack_top
65        };
66        while let Some((next_stack_top, defers)) = self.k_unshared_stack(stack_top, k) {
67            if stack_top == next_stack_top {
68                break;
69            }
70            if !defers.is_empty() {
71                return Some(stack_top);
72            }
73            stack_top = next_stack_top;
74        }
75        None
76    }
77
78    /// Build a call frame to be placed on the stack before transferring to a new chunk.
79    pub(crate) fn make_call_frame(
80        &mut self,
81        chunk: Arc<Chunk>,
82        called: Value,
83        with_defers: bool,
84    ) -> CallFrame {
85        let defers = if with_defers {
86            std::mem::take(&mut self.defers)
87        } else {
88            Vec::new()
89        };
90        let frame = CallFrame {
91            id: self.callframe_id,
92            chunk,
93            ip: self.ip_ptr,
94            current_ip: self.current_ip_ptr,
95            stack_top: self.stack_top,
96            this_fn: self.this_fn,
97            defers,
98            on_error: self.on_error,
99            called,
100        };
101        self.callframe_id += 1;
102        frame
103    }
104
105    fn finish_special_call(
106        &mut self,
107        chunk: Arc<Chunk>,
108        tail_call: bool,
109        first_reg: u16,
110        res: Value,
111    ) -> Arc<Chunk> {
112        let res_reg = self.stack_top + first_reg as usize;
113        if tail_call {
114            // Go to last call frame so SRET does not mess up the return of a builtin or other special call.
115            if let Some(frame) = self.call_frame() {
116                let stack_top = frame.stack_top;
117                let ip_ptr = frame.ip;
118                let current_ip = frame.current_ip;
119                let this_fn = frame.this_fn;
120                let on_error = frame.on_error;
121                let new_chunk = frame.chunk.clone();
122                self.copy_frame_defers(); // Do this BEFORE we change stack_top...
123                self.stack_top = stack_top;
124                self.stack_max = self.stack_top + new_chunk.input_regs + new_chunk.extra_regs;
125                self.ip_ptr = ip_ptr;
126                self.current_ip_ptr = current_ip;
127                self.this_fn = this_fn;
128                self.on_error = on_error;
129                *self.stack_mut(res_reg) = res;
130                new_chunk
131            } else {
132                *self.stack_mut(res_reg) = res;
133                chunk
134            }
135        } else {
136            *self.stack_mut(res_reg) = res;
137            chunk
138        }
139    }
140    /// Main function to match and execute anything that is callable.
141    pub fn make_call(
142        &mut self,
143        lambda: Value,
144        chunk: Arc<Chunk>,
145        first_reg: u16,
146        num_args: u16,
147        tail_call: bool,
148    ) -> Result<Arc<Chunk>, (VMError, Arc<Chunk>)> {
149        let mut do_cont = false;
150        let result = match lambda {
151            Value::Builtin(f_idx) => {
152                let last_reg = (first_reg + num_args + 1) as usize;
153                let f = &self.builtins[f_idx as usize];
154                let regs = self.register_slice();
155
156                let res =
157                    (f.func)(self, &regs[(first_reg + 1) as usize..last_reg]).map_err(|e| {
158                        if self.err_frame().is_some() {
159                            // We should be OK making the frame here only when needed.  If a builtin
160                            // calls bytecode it should be using do_call() which will restore state.
161                            let frame = self.make_call_frame(chunk.clone(), lambda, false);
162                            self.pause_gc(); // We might have allocated incoming params that are not rooted yet (see the CCC opcode for instance).
163                            let call_frame = self.alloc_callframe(frame);
164                            self.unpause_gc();
165                            mov_register!(self, first_reg as usize, call_frame);
166                            self.stack_top += first_reg as usize;
167                        }
168                        (e, chunk.clone())
169                    })?;
170                Ok(self.finish_special_call(chunk, tail_call, first_reg, res))
171            }
172            Value::Lambda(handle) => {
173                let l = self.heap().get_lambda(handle);
174                check_num_args(&l, num_args).map_err(|e| (e, chunk.clone()))?;
175                if l.rest {
176                    let (rest_reg, h) = self.setup_rest(&l, first_reg, num_args);
177                    *self.stack_mut(self.stack_top + rest_reg) = h;
178                }
179                if !tail_call {
180                    let frame = self.make_call_frame(chunk, lambda, true);
181                    self.pause_gc(); // We might have allocated incoming params that are not rooted yet (see the CCC opcode for instance).
182                    let aframe = self.alloc_callframe(frame);
183                    self.unpause_gc();
184                    mov_register!(self, first_reg as usize, aframe);
185                    self.stack_top += first_reg as usize;
186                }
187                self.stack_max = self.stack_top + l.input_regs + l.extra_regs;
188                self.this_fn = Some(lambda);
189                self.ip_ptr = get_code!(l);
190                // XXX TODO- maybe test for stack overflow vs waiting for a panic.
191                self.clear_opts(&l, first_reg, num_args);
192                Ok(l)
193            }
194            Value::Closure(handle) => {
195                let stack_top = self.stack_top;
196                let (l, _) = self.heap().get_closure(handle);
197                check_num_args(&l, num_args).map_err(|e| (e, chunk.clone()))?;
198                let cap_first = if l.rest {
199                    let (rest_reg, h) = self.setup_rest(&l, first_reg, num_args);
200                    *self.stack_mut(self.stack_top + rest_reg) = h;
201                    rest_reg + 1
202                } else {
203                    (first_reg + l.args + l.opt_args + 1) as usize
204                };
205
206                // Take the heap so we can mutate self.  Put it back when done or will panic on next access.
207                let heap = self.heap.take().expect("VM must have a Heap!");
208                let caps = heap.get_closure_captures(handle);
209                for (i, c) in caps.iter().enumerate() {
210                    *self.stack_mut(self.stack_top + cap_first + i) = Value::Value(*c);
211                }
212                // Put the heap back, if this doesn't happen will panic on next access attempt.
213                self.heap = Some(heap);
214
215                let frame = if !tail_call {
216                    let frame = self.make_call_frame(chunk, lambda, true);
217                    self.stack_top += first_reg as usize;
218                    Some(frame)
219                } else {
220                    assert_eq!(first_reg, 0);
221                    None
222                };
223                self.stack_max = self.stack_top + l.input_regs + l.extra_regs;
224                self.this_fn = Some(lambda);
225                self.ip_ptr = get_code!(l);
226                if let Some(frame) = frame {
227                    self.pause_gc(); // We might have allocated incoming params that are not rooted yet (see the CCC opcode for instance).
228                    let aframe = self.alloc_callframe(frame);
229                    self.unpause_gc();
230                    *self.stack_mut(stack_top + first_reg as usize) = aframe;
231                }
232                self.clear_opts(&l, first_reg, num_args);
233                Ok(l)
234            }
235            Value::Continuation(handle) => {
236                let k = self.heap().get_continuation(handle);
237                if num_args != 1 {
238                    return Err((VMError::new_vm("Continuation takes one argument."), chunk));
239                }
240                let from = self.k_defers(k);
241                if let Some(from) = from {
242                    let frame = self.call_frame_idx(from).expect("Invalid frame index!");
243                    // Need to break the call frame lifetime from self to avoid extra work (allocations).
244                    // This should safe because the stack and heap are not touched so the reference is
245                    // stable.  The unwrap() is OK because the frame can not be NULL.
246                    let frame: &CallFrame =
247                        unsafe { (frame as *const CallFrame).as_ref().unwrap() };
248                    self.defers.resize(frame.defers.len(), Value::Undefined);
249                    self.defers.copy_from_slice(&frame.defers[..]);
250                    self.k_stack_top = Some(frame.stack_top);
251                }
252                if let Some(defer) = self.defers.pop() {
253                    let first_reg = (chunk.input_regs + chunk.extra_regs + 1) as u16;
254                    self.ip_ptr = self.current_ip_ptr;
255                    self.make_call(defer, chunk, first_reg, 0, false)
256                } else {
257                    self.k_stack_top = None;
258                    do_cont = true;
259                    Ok(chunk)
260                }
261            }
262            Value::Map(handle) => {
263                let res = self
264                    .call_map(handle, first_reg, num_args)
265                    .map_err(|e| (e, chunk.clone()))?;
266                Ok(self.finish_special_call(chunk, tail_call, first_reg, res))
267            }
268            Value::Vector(handle) => {
269                let res = self
270                    .call_vector(handle, first_reg, num_args)
271                    .map_err(|e| (e, chunk.clone()))?;
272                Ok(self.finish_special_call(chunk, tail_call, first_reg, res))
273            }
274            Value::Pair(_) | Value::List(_, _) => {
275                let res = self
276                    .call_list(lambda, first_reg, num_args)
277                    .map_err(|e| (e, chunk.clone()))?;
278                Ok(self.finish_special_call(chunk, tail_call, first_reg, res))
279            }
280            Value::Value(handle) => {
281                // Need to deref.
282                self.make_call(
283                    self.get_value(handle),
284                    chunk,
285                    first_reg,
286                    num_args,
287                    tail_call,
288                )
289            }
290            _ => Err((
291                VMError::new_vm(format!("CALL: Not a callable {lambda:?}.")),
292                chunk,
293            )),
294        };
295        if do_cont {
296            // Had to break this out for continuations. Handling defers makes this necessary.
297            match lambda {
298                Value::Continuation(h) => {
299                    // Take the heap so we can mutate self.  Put it back when down or will panic on next access.
300                    let heap = self.heap.take().expect("VM must have a Heap!");
301                    let k = heap.get_continuation(h);
302                    let arg = self.register(first_reg as usize + 1);
303
304                    self.defers.resize(k.frame.defers.len(), Value::Undefined);
305                    self.defers.copy_from_slice(&k.frame.defers[..]);
306
307                    self.stack_slice_mut()[..k.stack.len()].copy_from_slice(&k.stack[..]);
308                    *self.stack_mut(k.arg_reg) = arg;
309                    self.stack_top = k.frame.stack_top;
310                    self.stack_max =
311                        self.stack_top + k.frame.chunk.input_regs + k.frame.chunk.extra_regs;
312                    self.ip_ptr = k.frame.ip;
313                    self.current_ip_ptr = k.frame.current_ip;
314                    self.this_fn = k.frame.this_fn;
315                    self.on_error = k.frame.on_error;
316                    let chunk = k.frame.chunk.clone();
317                    // Put the heap back, if this doesn't happen will panic on next access attempt.
318                    self.heap = Some(heap);
319                    Ok(chunk)
320                }
321                _ => panic!("Must be a continuation!"),
322            }
323        } else {
324            result
325        }
326    }
327
328    /// Clear out the unused optional regs.
329    /// Will clear working set to avoid writing to globals or closures by accident.
330    fn clear_opts(&mut self, l: &Chunk, first_reg: u16, num_args: u16) {
331        // First clear any optional arguments.
332        let num_args = if l.rest && num_args == 0 {
333            // Always have at least 1 arg if we have a rest argument.
334            1
335        } else {
336            num_args
337        };
338        let end_arg = if l.rest {
339            // Do not clear the rest arg.
340            l.args + l.opt_args - 1
341        } else {
342            l.args + l.opt_args
343        };
344        if num_args < end_arg {
345            for r in num_args..end_arg {
346                mov_register!(
347                    self,
348                    first_reg as usize + (r + 1) as usize,
349                    Value::Undefined
350                );
351            }
352        }
353        // Clear extra regs so things like closures or globals don't get changed by mistake.
354        if l.extra_regs > 0 {
355            for r in l.input_regs..=l.input_regs + l.extra_regs {
356                mov_register!(self, first_reg as usize + r, Value::Undefined);
357            }
358        }
359    }
360}
361
362/// Verify the number of args provided will work with a chunk.
363fn check_num_args(l: &Chunk, num_args: u16) -> VMResult<()> {
364    if l.rest {
365        if num_args < (l.args - 1) {
366            return Err(VMError::new_vm(format!(
367                "To few arguments, expected at least {} got {}.",
368                l.args - 1,
369                num_args
370            )));
371        }
372    } else {
373        if num_args < l.args {
374            return Err(VMError::new_vm(format!(
375                "To few arguments, expected at least {} got {}.",
376                l.args, num_args
377            )));
378        }
379        if num_args > (l.args + l.opt_args) {
380            return Err(VMError::new_vm(format!(
381                "To many arguments, expected no more than {} got {}.",
382                (l.args + l.opt_args),
383                num_args
384            )));
385        }
386    }
387    Ok(())
388}