From e072f1e33504c7594e7fbd42b8de0d873ba2cd1a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fatmanur=20=C3=87etinta=C5=9F?= <99668549+fatmanur7@users.noreply.github.com> Date: Fri, 29 Mar 2024 16:50:17 +0300 Subject: [PATCH] style: edit the norminette --- src/lexer/lexer.c | 87 +++++++++++++++++++++-------------------- src/lexer/lexer_utils.c | 34 ++++++++-------- src/lexer/unquote.c | 36 +++++++++-------- 3 files changed, 82 insertions(+), 75 deletions(-) diff --git a/src/lexer/lexer.c b/src/lexer/lexer.c index 2538ebb..fb7f980 100644 --- a/src/lexer/lexer.c +++ b/src/lexer/lexer.c @@ -6,7 +6,7 @@ /* By: facetint +#+ +:+ +#+ */ /* +#+#+#+#+#+ +#+ */ /* Created: 2024/03/03 13:17:52 by facetint #+# #+# */ -/* Updated: 2024/03/28 16:58:20 by facetint ### ########.fr */ +/* Updated: 2024/03/29 16:44:08 by facetint ### ########.fr */ /* */ /* ************************************************************************** */ @@ -16,7 +16,7 @@ #include "../../includes/utils.h" #include "../../includes/char_classification.h" -t_token_type get_meta_token_type(const char *input) +t_token_type get_meta_token_type(const char *input) { if (!input || !*input) return (UNKNOWN); @@ -37,90 +37,93 @@ t_token_type get_meta_token_type(const char *input) return (UNKNOWN); } -lexer_state word_state(t_token **lexer_data, char *input, int *const index) +lexer_state word_state(t_token **lexer_data, char *input, int *const i) { - t_token token; - int start_index; + t_token token; + int start_i; - start_index = *index; - if (input[*index] == DOUBLE_QUOTE) + start_i = *i; + if (input[*i] == DOUBLE_QUOTE) { token.type = DOUBLE_QUOTED_WORD; - *index += find_char(&input[*index + 1], DOUBLE_QUOTE) + 1; + *i += find_char(&input[*i + 1], DOUBLE_QUOTE) + 1; } - else if (input[*index] == SINGLE_QUOTE) + else if (input[*i] == SINGLE_QUOTE) { token.type = SINGLE_QUOTED_WORD; - *index += find_char(&input[*index + 1], SINGLE_QUOTE) + 1; + *i += find_char(&input[*i + 1], SINGLE_QUOTE) + 1; } else { token.type = UNQUOTED_WORD; - while (input[*index] && (is_unquoted_word_char(input[*index]) || is_escaped(input, *index))) - (*index)++; - (*index)--; + while (input[*i] && (is_unquoted_word_char(input[*i]) + || is_escaped(input, *i))) + (*i)++; + (*i)--; } - token.value = ft_substr(input, start_index, *index - start_index + 1); + token.value = ft_substr(input, start_i, *i - start_i + 1); if (ft_strlen(token.value) > 0) lexer_data_append(lexer_data, lexer_data_new(token)); return ((lexer_state) delimiter_state); } -lexer_state operator_state_l(t_token **lexer_data, char *input, int *const index) +lexer_state operator_state_l(t_token **lexer_data, char *input, int *const i) { - int length; - t_token token; + int length; + t_token token; - token.type = get_meta_token_type(&input[*index]); - if (token.type == PIPE || token.type == OUTPUT_REDIRECTION || token.type == INPUT_REDIRECTION) + token.type = get_meta_token_type(&input[*i]); + if (token.type == PIPE || token.type == OUTPUT_REDIRECTION + || token.type == INPUT_REDIRECTION) length = 1; - else if (token.type == APPEND_REDIRECTION || token.type == HEREDOC_REDIRECTION) + else if (token.type == APPEND_REDIRECTION + || token.type == HEREDOC_REDIRECTION) length = 2; else - return NULL; // do not append new token - *index = *index + length - 1; // to mark where we stopped scanning + return (NULL); + *i = *i + length - 1; token.value = NULL; lexer_data_append(lexer_data, lexer_data_new(token)); return ((lexer_state) delimiter_state); } -lexer_state delimiter_state(t_token **lexer_data, char *input, int *const index) +lexer_state delimiter_state(t_token **lexer_data, char *input, int *const i) { - t_token token; - int skipped_spaces; - char next_non_whitespace; + t_token token; + int skipped_spaces; + char next_non_whitespace; - skipped_spaces = skip_white_spaces(&input[*index]); - next_non_whitespace = input[*index + skipped_spaces]; + skipped_spaces = skip_white_spaces(&input[*i]); + next_non_whitespace = input[*i + skipped_spaces]; if (next_non_whitespace == '\0') - return NULL; + return (NULL); if (skipped_spaces != 0) { - *index = *index + skipped_spaces - 1; // to mark where we stopped scanning + *i = *i + skipped_spaces - 1; token.value = NULL; token.type = DELIMITER; lexer_data_append(lexer_data, lexer_data_new(token)); } else - { - *index = *index - 1; - } + *i = *i - 1; if (is_meta_char(next_non_whitespace)) - return (lexer_state) operator_state_l; + return ((lexer_state) operator_state_l); return ((lexer_state) word_state); } -t_token *lex(char *input) +t_token *lex(char *input) { - t_token *token = NULL; - lexer_state next_state = (lexer_state) word_state; - int index; + int i; + t_token *token; + lexer_state next_state; - index = skip_white_spaces(input); - while (next_state && input[index]) + next_state = (lexer_state) word_state; + token = NULL; + i = skip_white_spaces(input); + while (next_state && input[i]) { - next_state = (lexer_state) next_state(&token, input, &index); - index++; + next_state = (lexer_state) next_state(&token, input, &i); + i++; } return (token); } diff --git a/src/lexer/lexer_utils.c b/src/lexer/lexer_utils.c index a4771d1..b2f5054 100644 --- a/src/lexer/lexer_utils.c +++ b/src/lexer/lexer_utils.c @@ -6,7 +6,7 @@ /* By: facetint +#+ +:+ +#+ */ /* +#+#+#+#+#+ +#+ */ /* Created: 2024/03/03 13:17:50 by facetint #+# #+# */ -/* Updated: 2024/03/14 16:07:31 by facetint ### ########.fr */ +/* Updated: 2024/03/29 16:46:05 by facetint ### ########.fr */ /* */ /* ************************************************************************** */ @@ -14,43 +14,44 @@ #include "../../memory-allocator/allocator.h" #include -t_token *get_last_lexer_data(t_token *token) +t_token *get_last_lexer_data(t_token *token) { if (!token) - return NULL; + return (NULL); while (token->next) token = token->next; - return token; + return (token); } -t_token *lexer_data_new(t_token token) +t_token *lexer_data_new(t_token token) { - t_token *data; + t_token *data; data = safe_malloc(sizeof(t_token)); *data = token; data->next = NULL; - return data; + return (data); } -void lexer_data_insert(t_token *data, t_token *new_list) { - t_token *last; +void lexer_data_insert(t_token *data, t_token *new_list) +{ + t_token *last; if (!data || !new_list) - return; + return ; last = get_last_lexer_data(new_list); last->next = data->next; data->next = new_list; } -void lexer_data_append(t_token **data, t_token *new_data) +void lexer_data_append(t_token **data, t_token *new_data) { - t_token *cur_data; + t_token *cur_data; if (*data == NULL) { *data = new_data; - return; + return ; } cur_data = *data; while (cur_data->next != NULL) @@ -58,8 +59,9 @@ void lexer_data_append(t_token **data, t_token *new_data) cur_data->next = new_data; } -void uninit_tokens(t_token *lexical_data) { - t_token *next; +void uninit_tokens(t_token *lexical_data) +{ + t_token *next; while (lexical_data) { @@ -69,4 +71,4 @@ void uninit_tokens(t_token *lexical_data) { safe_free(lexical_data); lexical_data = next; } -} \ No newline at end of file +} diff --git a/src/lexer/unquote.c b/src/lexer/unquote.c index 7e7eb54..83dd205 100644 --- a/src/lexer/unquote.c +++ b/src/lexer/unquote.c @@ -6,7 +6,7 @@ /* By: facetint +#+ +:+ +#+ */ /* +#+#+#+#+#+ +#+ */ /* Created: 2024/03/03 13:18:41 by facetint #+# #+# */ -/* Updated: 2024/03/28 16:57:58 by facetint ### ########.fr */ +/* Updated: 2024/03/29 16:49:39 by facetint ### ########.fr */ /* */ /* ************************************************************************** */ @@ -16,10 +16,10 @@ #include "../../memory-allocator/allocator.h" #include "../../includes/utils.h" -int count_escapes(char *str) +int count_escapes(char *str) { - unsigned int i; - int count; + int count; + unsigned int i; i = 0; count = 0; @@ -29,15 +29,15 @@ int count_escapes(char *str) count++; i++; } - return count; + return (count); } -char *escaped_strdup(char *str) +char *escaped_strdup(char *str) { - char *result; - unsigned int i; - unsigned int j; - int length; + char *result; + unsigned int i; + unsigned int j; + int length; length = (int) ft_strlen(str) - count_escapes(str); result = safe_malloc(length + 1); @@ -48,34 +48,36 @@ char *escaped_strdup(char *str) if (str[i] == '\\') { if (str[i + 1] == '\0') - break; + break ; i++; } result[j++] = str[i++]; } result[j] = '\0'; - return result; + return (result); } void unquote(t_token *lexer_data) { char *unquoted_value; + while (lexer_data) { - if (lexer_data->type == SINGLE_QUOTED_WORD || lexer_data->type == DOUBLE_QUOTED_WORD) + if (lexer_data->type == SINGLE_QUOTED_WORD + || lexer_data->type == DOUBLE_QUOTED_WORD) { - unquoted_value = ft_substr(lexer_data->value, 1, ft_strlen(lexer_data->value) - 2); + unquoted_value = ft_substr(lexer_data->value, 1, + ft_strlen(lexer_data->value) - 2); safe_free(lexer_data->value); lexer_data->value = unquoted_value; } - if(lexer_data->type == UNQUOTED_WORD || lexer_data->type == DOUBLE_QUOTED_WORD) + if (lexer_data->type == UNQUOTED_WORD + || lexer_data->type == DOUBLE_QUOTED_WORD) { unquoted_value = escaped_strdup(lexer_data->value); safe_free(lexer_data->value); lexer_data->value = unquoted_value; } - lexer_data = lexer_data->next; } - }