Skip to content

Commit 7fb58b0

Browse files
committed
Fix refleaks
1 parent f1a5090 commit 7fb58b0

File tree

5 files changed

+32
-9
lines changed

5 files changed

+32
-9
lines changed

Parser/pegen.c

+2-2
Original file line numberDiff line numberDiff line change
@@ -208,7 +208,7 @@ int
208208
_PyPegen_fill_token(Parser *p)
209209
{
210210
struct token new_token;
211-
new_token.metadata = NULL;
211+
_PyToken_Init(&new_token);
212212
int type = _PyTokenizer_Get(p->tok, &new_token);
213213

214214
// Record and skip '# type: ignore' comments
@@ -251,7 +251,7 @@ _PyPegen_fill_token(Parser *p)
251251
Token *t = p->tokens[p->fill];
252252
return initialize_token(p, t, &new_token, type);
253253
error:
254-
Py_XDECREF(new_token.metadata);
254+
_PyToken_Free(&new_token);
255255
return -1;
256256
}
257257

Parser/pegen_errors.c

+2-2
Original file line numberDiff line numberDiff line change
@@ -165,7 +165,7 @@ _PyPegen_tokenize_full_source_to_check_for_errors(Parser *p) {
165165

166166
int ret = 0;
167167
struct token new_token;
168-
new_token.metadata = NULL;
168+
_PyToken_Init(&new_token);
169169

170170
for (;;) {
171171
switch (_PyTokenizer_Get(p->tok, &new_token)) {
@@ -193,7 +193,7 @@ _PyPegen_tokenize_full_source_to_check_for_errors(Parser *p) {
193193

194194

195195
exit:
196-
Py_XDECREF(new_token.metadata);
196+
_PyToken_Free(&new_token);
197197
// If we're in an f-string, we want the syntax error in the expression part
198198
// to propagate, so that tokenizer errors (like expecting '}') that happen afterwards
199199
// do not swallow it.

Parser/tokenizer.c

+15
Original file line numberDiff line numberDiff line change
@@ -982,6 +982,16 @@ _PyTokenizer_Free(struct tok_state *tok)
982982
PyMem_Free(tok);
983983
}
984984

985+
void
986+
_PyToken_Free(struct token *token) {
987+
Py_XDECREF(token->metadata);
988+
}
989+
990+
void
991+
_PyToken_Init(struct token *token) {
992+
token->metadata = NULL;
993+
}
994+
985995
static int
986996
tok_readline_raw(struct tok_state *tok)
987997
{
@@ -1973,6 +1983,7 @@ tok_get_normal_mode(struct tok_state *tok, tokenizer_mode* current_tok, struct t
19731983

19741984
struct tok_state ahead_tok;
19751985
struct token ahead_token;
1986+
_PyToken_Init(&ahead_token);
19761987
int ahead_tok_kind;
19771988

19781989
memcpy(&ahead_tok, tok, sizeof(ahead_tok));
@@ -1988,8 +1999,10 @@ tok_get_normal_mode(struct tok_state *tok, tokenizer_mode* current_tok, struct t
19881999
returning a plain NAME token, return ASYNC. */
19892000
tok->async_def_indent = tok->indent;
19902001
tok->async_def = 1;
2002+
_PyToken_Free(&ahead_token);
19912003
return MAKE_TOKEN(ASYNC);
19922004
}
2005+
_PyToken_Free(&ahead_token);
19932006
}
19942007
}
19952008

@@ -2823,7 +2836,9 @@ _PyTokenizer_FindEncodingFilename(int fd, PyObject *filename)
28232836
// if fetching the encoding shows a warning.
28242837
tok->report_warnings = 0;
28252838
while (tok->lineno < 2 && tok->done == E_OK) {
2839+
_PyToken_Init(&token);
28262840
_PyTokenizer_Get(tok, &token);
2841+
_PyToken_Free(&token);
28272842
}
28282843
fclose(fp);
28292844
if (tok->encoding) {

Parser/tokenizer.h

+2
Original file line numberDiff line numberDiff line change
@@ -139,6 +139,8 @@ extern struct tok_state *_PyTokenizer_FromUTF8(const char *, int);
139139
extern struct tok_state *_PyTokenizer_FromFile(FILE *, const char*,
140140
const char *, const char *);
141141
extern void _PyTokenizer_Free(struct tok_state *);
142+
extern void _PyToken_Free(struct token *);
143+
extern void _PyToken_Init(struct token *);
142144
extern int _PyTokenizer_Get(struct tok_state *, struct token *);
143145

144146
#define tok_dump _Py_tok_dump

Python/Python-tokenize.c

+11-5
Original file line numberDiff line numberDiff line change
@@ -162,18 +162,21 @@ _tokenizer_error(struct tok_state *tok)
162162
static PyObject *
163163
tokenizeriter_next(tokenizeriterobject *it)
164164
{
165+
PyObject* result = NULL;
165166
struct token token;
167+
_PyToken_Init(&token);
168+
166169
int type = _PyTokenizer_Get(it->tok, &token);
167170
if (type == ERRORTOKEN) {
168171
if(!PyErr_Occurred()) {
169172
_tokenizer_error(it->tok);
170173
assert(PyErr_Occurred());
171174
}
172-
return NULL;
175+
goto exit;
173176
}
174177
if (type == ERRORTOKEN || type == ENDMARKER) {
175178
PyErr_SetString(PyExc_StopIteration, "EOF");
176-
return NULL;
179+
goto exit;
177180
}
178181
PyObject *str = NULL;
179182
if (token.start == NULL || token.end == NULL) {
@@ -183,14 +186,14 @@ tokenizeriter_next(tokenizeriterobject *it)
183186
str = PyUnicode_FromStringAndSize(token.start, token.end - token.start);
184187
}
185188
if (str == NULL) {
186-
return NULL;
189+
goto exit;
187190
}
188191

189192
Py_ssize_t size = it->tok->inp - it->tok->buf;
190193
PyObject *line = PyUnicode_DecodeUTF8(it->tok->buf, size, "replace");
191194
if (line == NULL) {
192195
Py_DECREF(str);
193-
return NULL;
196+
goto exit;
194197
}
195198
const char *line_start = ISSTRINGLIT(type) ? it->tok->multi_line_start : it->tok->line_start;
196199
Py_ssize_t lineno = ISSTRINGLIT(type) ? it->tok->first_lineno : it->tok->lineno;
@@ -204,7 +207,10 @@ tokenizeriter_next(tokenizeriterobject *it)
204207
end_col_offset = _PyPegen_byte_offset_to_character_offset(line, token.end - it->tok->line_start);
205208
}
206209

207-
return Py_BuildValue("(NinnnnN)", str, type, lineno, end_lineno, col_offset, end_col_offset, line);
210+
result = Py_BuildValue("(NinnnnN)", str, type, lineno, end_lineno, col_offset, end_col_offset, line);
211+
exit:
212+
_PyToken_Free(&token);
213+
return result;
208214
}
209215

210216
static void

0 commit comments

Comments
 (0)