Fix 'return' handling with 'yield' in 'for of' or with finally blocks

Ref: 4bb8c35da7
This commit is contained in:
Saúl Ibarra Corretgé 2023-12-14 10:54:12 +01:00
parent 39901e2b86
commit e5812862f9
3 changed files with 141 additions and 76 deletions

View file

@ -183,6 +183,7 @@ DEF( goto, 5, 0, 0, label) /* must come after if_true */
DEF( catch, 5, 0, 1, label)
DEF( gosub, 5, 0, 0, label) /* used to execute the finally block */
DEF( ret, 1, 1, 0, none) /* used to return from the finally block */
DEF( nip_catch, 1, 2, 1, none) /* catch ... a -> a */
DEF( to_object, 1, 1, 1, none)
//DEF( to_string, 1, 1, 1, none)
@ -209,7 +210,6 @@ DEF( for_of_next, 2, 3, 5, u8)
DEF(iterator_check_object, 1, 1, 1, none)
DEF(iterator_get_value_done, 1, 1, 2, none)
DEF( iterator_close, 1, 3, 0, none)
DEF(iterator_close_return, 1, 4, 4, none)
DEF( iterator_next, 1, 4, 4, none)
DEF( iterator_call, 2, 4, 5, u8)
DEF( initial_yield, 1, 0, 0, none)

157
quickjs.c
View file

@ -70,7 +70,8 @@
8: dump stdlib functions
16: dump bytecode in hex
32: dump line number table
64: dump executed bytecode
64: dump compute_stack_size
128: dump executed bytecode
*/
//#define DUMP_BYTECODE (1)
/* dump the occurence of the automatic GC */
@ -14354,7 +14355,7 @@ static JSValue JS_CallInternal(JSContext *caller_ctx, JSValue func_obj,
size_t alloca_size;
JSInlineCache *ic;
#if defined(DUMP_BYTECODE) && (DUMP_BYTECODE & 64)
#if defined(DUMP_BYTECODE) && (DUMP_BYTECODE & 128)
#define DUMP_BYTECODE_OR_DONT(pc) dump_single_byte_code(ctx, pc, b);
#else
#define DUMP_BYTECODE_OR_DONT(pc)
@ -14463,7 +14464,7 @@ static JSValue JS_CallInternal(JSContext *caller_ctx, JSValue func_obj,
ctx = b->realm; /* set the current realm */
ic = b->ic;
#if defined(DUMP_BYTECODE) && (DUMP_BYTECODE & 64)
#if defined(DUMP_BYTECODE) && (DUMP_BYTECODE & 128)
print_func_name(b);
#endif
@ -15599,26 +15600,21 @@ static JSValue JS_CallInternal(JSContext *caller_ctx, JSValue func_obj,
}
sp--;
BREAK;
CASE(OP_iterator_close_return):
CASE(OP_nip_catch):
{
JSValue ret_val;
/* iter_obj next catch_offset ... ret_val ->
ret_eval iter_obj next catch_offset */
/* catch_offset ... ret_val -> ret_eval */
ret_val = *--sp;
while (sp > stack_buf &&
JS_VALUE_GET_TAG(sp[-1]) != JS_TAG_CATCH_OFFSET) {
JS_FreeValue(ctx, *--sp);
}
if (unlikely(sp < stack_buf + 3)) {
JS_ThrowInternalError(ctx, "iterator_close_return");
if (unlikely(sp == stack_buf)) {
JS_ThrowInternalError(ctx, "nip_catch");
JS_FreeValue(ctx, ret_val);
goto exception;
}
sp[0] = sp[-1];
sp[-1] = sp[-2];
sp[-2] = sp[-3];
sp[-3] = ret_val;
sp++;
sp[-1] = ret_val;
}
BREAK;
@ -23876,7 +23872,6 @@ static __exception int emit_break(JSParseState *s, JSAtom name, int is_cont)
static void emit_return(JSParseState *s, BOOL hasval)
{
BlockEnv *top;
int drop_count;
if (s->cur_func->func_kind != JS_FUNC_NORMAL) {
if (!hasval) {
@ -23890,26 +23885,24 @@ static void emit_return(JSParseState *s, BOOL hasval)
}
}
drop_count = 0;
top = s->cur_func->top_break;
while (top != NULL) {
/* XXX: emit the appropriate OP_leave_scope opcodes? Probably not
required as all local variables will be closed upon returning
from JS_CallInternal, but not in the same order. */
if (top->has_iterator) {
/* with 'yield', the exact number of OP_drop to emit is
unknown, so we use a specific operation to look for
the catch offset */
if (top->has_iterator || top->label_finally != -1) {
if (!hasval) {
emit_op(s, OP_undefined);
hasval = TRUE;
}
emit_op(s, OP_iterator_close_return);
/* Remove the stack elements up to and including the catch
offset. When 'yield' is used in an expression we have
no easy way to count them, so we use this specific
instruction instead. */
emit_op(s, OP_nip_catch);
/* stack: iter_obj next ret_val */
if (top->has_iterator) {
if (s->cur_func->func_kind == JS_FUNC_ASYNC_GENERATOR) {
int label_next, label_next2;
emit_op(s, OP_drop); /* catch offset */
emit_op(s, OP_drop); /* next */
emit_op(s, OP_nip); /* next */
emit_op(s, OP_swap);
emit_op(s, OP_get_field2);
emit_atom(s, JS_ATOM_return);
emit_ic(s, JS_ATOM_return);
@ -23927,24 +23920,15 @@ static void emit_return(JSParseState *s, BOOL hasval)
emit_label(s, label_next2);
emit_op(s, OP_drop);
} else {
emit_op(s, OP_rot3r);
emit_op(s, OP_undefined); /* dummy catch offset */
emit_op(s, OP_iterator_close);
}
drop_count = -3;
}
drop_count += top->drop_count;
if (top->label_finally != -1) {
while(drop_count) {
/* must keep the stack top if hasval */
emit_op(s, hasval ? OP_nip : OP_drop);
drop_count--;
}
if (!hasval) {
/* must push return value to keep same stack size */
emit_op(s, OP_undefined);
hasval = TRUE;
}
} else {
/* execute the "finally" block */
emit_goto(s, OP_gosub, top->label_finally);
}
}
top = top->prev;
}
if (s->cur_func->is_derived_class_constructor) {
@ -30455,6 +30439,7 @@ typedef struct StackSizeState {
int bc_len;
int stack_len_max;
uint16_t *stack_level_tab;
int32_t *catch_pos_tab;
int *pc_stack;
int pc_stack_len;
int pc_stack_size;
@ -30462,7 +30447,7 @@ typedef struct StackSizeState {
/* 'op' is only used for error indication */
static __exception int ss_check(JSContext *ctx, StackSizeState *s,
int pos, int op, int stack_len)
int pos, int op, int stack_len, int catch_pos)
{
if ((unsigned)pos >= s->bc_len) {
JS_ThrowInternalError(ctx, "bytecode buffer overflow (op=%d, pc=%d)", op, pos);
@ -30481,6 +30466,10 @@ static __exception int ss_check(JSContext *ctx, StackSizeState *s,
JS_ThrowInternalError(ctx, "inconsistent stack size: %d %d (pc=%d)",
s->stack_level_tab[pos], stack_len, pos);
return -1;
} else if (s->catch_pos_tab[pos] != catch_pos) {
JS_ThrowInternalError(ctx, "inconsistent catch position: %d %d (pc=%d)",
s->catch_pos_tab[pos], catch_pos, pos);
return -1;
} else {
return 0;
}
@ -30488,6 +30477,7 @@ static __exception int ss_check(JSContext *ctx, StackSizeState *s,
/* mark as explored and store the stack size */
s->stack_level_tab[pos] = stack_len;
s->catch_pos_tab[pos] = catch_pos;
/* queue the new PC to explore */
if (js_resize_array(ctx, (void **)&s->pc_stack, sizeof(s->pc_stack[0]),
@ -30502,7 +30492,7 @@ static __exception int compute_stack_size(JSContext *ctx,
int *pstack_size)
{
StackSizeState s_s, *s = &s_s;
int i, diff, n_pop, pos_next, stack_len, pos, op;
int i, diff, n_pop, pos_next, stack_len, pos, op, catch_pos, catch_level;
const JSOpCode *oi;
const uint8_t *bc_buf;
@ -30515,24 +30505,32 @@ static __exception int compute_stack_size(JSContext *ctx,
return -1;
for(i = 0; i < s->bc_len; i++)
s->stack_level_tab[i] = 0xffff;
s->stack_len_max = 0;
s->pc_stack = NULL;
s->catch_pos_tab = js_malloc(ctx, sizeof(s->catch_pos_tab[0]) * s->bc_len);
if (!s->catch_pos_tab)
goto fail;
s->stack_len_max = 0;
s->pc_stack_len = 0;
s->pc_stack_size = 0;
/* breadth-first graph exploration */
if (ss_check(ctx, s, 0, OP_invalid, 0))
if (ss_check(ctx, s, 0, OP_invalid, 0, -1))
goto fail;
while (s->pc_stack_len > 0) {
pos = s->pc_stack[--s->pc_stack_len];
stack_len = s->stack_level_tab[pos];
catch_pos = s->catch_pos_tab[pos];
op = bc_buf[pos];
if (op == 0 || op >= OP_COUNT) {
JS_ThrowInternalError(ctx, "invalid opcode (op=%d, pc=%d)", op, pos);
goto fail;
}
oi = &short_opcode_info(op);
#if defined(DUMP_BYTECODE) && (DUMP_BYTECODE & 64)
printf("%5d: %10s %5d %5d\n", pos, oi->name, stack_len, catch_pos);
#endif
pos_next = pos + oi->size;
if (pos_next > s->bc_len) {
JS_ThrowInternalError(ctx, "bytecode buffer overflow (op=%d, pc=%d)", op, pos);
@ -30583,54 +30581,103 @@ static __exception int compute_stack_size(JSContext *ctx,
case OP_if_true8:
case OP_if_false8:
diff = (int8_t)bc_buf[pos + 1];
if (ss_check(ctx, s, pos + 1 + diff, op, stack_len))
if (ss_check(ctx, s, pos + 1 + diff, op, stack_len, catch_pos))
goto fail;
break;
case OP_if_true:
case OP_if_false:
case OP_catch:
diff = get_u32(bc_buf + pos + 1);
if (ss_check(ctx, s, pos + 1 + diff, op, stack_len))
if (ss_check(ctx, s, pos + 1 + diff, op, stack_len, catch_pos))
goto fail;
break;
case OP_gosub:
diff = get_u32(bc_buf + pos + 1);
if (ss_check(ctx, s, pos + 1 + diff, op, stack_len + 1))
if (ss_check(ctx, s, pos + 1 + diff, op, stack_len + 1, catch_pos))
goto fail;
break;
case OP_with_get_var:
case OP_with_delete_var:
diff = get_u32(bc_buf + pos + 5);
if (ss_check(ctx, s, pos + 5 + diff, op, stack_len + 1))
if (ss_check(ctx, s, pos + 5 + diff, op, stack_len + 1, catch_pos))
goto fail;
break;
case OP_with_make_ref:
case OP_with_get_ref:
case OP_with_get_ref_undef:
diff = get_u32(bc_buf + pos + 5);
if (ss_check(ctx, s, pos + 5 + diff, op, stack_len + 2))
if (ss_check(ctx, s, pos + 5 + diff, op, stack_len + 2, catch_pos))
goto fail;
break;
case OP_with_put_var:
diff = get_u32(bc_buf + pos + 5);
if (ss_check(ctx, s, pos + 5 + diff, op, stack_len - 1))
if (ss_check(ctx, s, pos + 5 + diff, op, stack_len - 1, catch_pos))
goto fail;
break;
case OP_catch:
diff = get_u32(bc_buf + pos + 1);
if (ss_check(ctx, s, pos + 1 + diff, op, stack_len, catch_pos))
goto fail;
catch_pos = pos;
break;
case OP_for_of_start:
case OP_for_await_of_start:
catch_pos = pos;
break;
/* we assume the catch offset entry is only removed with
some op codes */
case OP_drop:
catch_level = stack_len;
goto check_catch;
case OP_nip:
catch_level = stack_len - 1;
goto check_catch;
case OP_nip1:
catch_level = stack_len - 1;
goto check_catch;
case OP_iterator_close:
catch_level = stack_len + 2;
check_catch:
/* Note: for for_of_start/for_await_of_start we consider
the catch offset is on the first stack entry instead of
the thirst */
if (catch_pos >= 0) {
int level;
level = s->stack_level_tab[catch_pos];
if (bc_buf[catch_pos] != OP_catch)
level++; /* for_of_start, for_wait_of_start */
/* catch_level = stack_level before op_catch is executed ? */
if (catch_level == level) {
catch_pos = s->catch_pos_tab[catch_pos];
}
}
break;
case OP_nip_catch:
if (catch_pos < 0) {
JS_ThrowInternalError(ctx, "nip_catch: no catch op (pc=%d)", pos);
goto fail;
}
stack_len = s->stack_level_tab[catch_pos];
if (bc_buf[catch_pos] != OP_catch)
stack_len++; /* for_of_start, for_wait_of_start */
stack_len++; /* no stack overflow is possible by construction */
catch_pos = s->catch_pos_tab[catch_pos];
break;
default:
break;
}
if (ss_check(ctx, s, pos_next, op, stack_len))
if (ss_check(ctx, s, pos_next, op, stack_len, catch_pos))
goto fail;
done_insn: ;
}
js_free(ctx, s->stack_level_tab);
js_free(ctx, s->pc_stack);
js_free(ctx, s->catch_pos_tab);
js_free(ctx, s->stack_level_tab);
*pstack_size = s->stack_len_max;
return 0;
fail:
js_free(ctx, s->stack_level_tab);
js_free(ctx, s->pc_stack);
js_free(ctx, s->catch_pos_tab);
js_free(ctx, s->stack_level_tab);
*pstack_size = 0;
return -1;
}

View file

@ -722,6 +722,18 @@ function test_generator()
assert(ret, "ret_val");
return 3;
}
function *f3() {
var ret;
/* test stack consistency with nip_n to handle yield return +
* finally clause */
try {
ret = 2 + (yield 1);
} catch(e) {
} finally {
ret++;
}
return ret;
}
var g, v;
g = f();
v = g.next();
@ -742,6 +754,12 @@ function test_generator()
assert(v.value === 3 && v.done === true);
v = g.next();
assert(v.value === undefined && v.done === true);
g = f3();
v = g.next();
assert(v.value === 1 && v.done === false);
v = g.next(3);
assert(v.value === 6 && v.done === true);
}
/* CVE-2023-31922 */