Skip to content

Commit

Permalink
Merge pull request #4 from facetint/hamza-cskn
Browse files Browse the repository at this point in the history
merge Hamza cskn
  • Loading branch information
facetint authored Feb 24, 2024
2 parents a061b24 + b6f3e45 commit 54804a1
Show file tree
Hide file tree
Showing 19 changed files with 1,077 additions and 204 deletions.
37 changes: 26 additions & 11 deletions Makefile
Original file line number Diff line number Diff line change
@@ -1,28 +1,43 @@

# Path to the libft.a archive
NAME = minishell

LIBFT_DIR = ./libft
LIBFT_PATH = $(LIBFT_DIR)/libft.a
MEMORY_ALLOCATOR_SOURCES = memory-allocator/aborter.c memory-allocator/allocator.c
SOURCES = main.c utils.c lexer.c unquote.c lexer_utils.c parser.c expander.c splitter.c syntax_analyzer.c $(MEMORY_ALLOCATOR_SOURCES)

CC = gcc
FLAGS = -Wall -Wextra -Werror -g
OBJECTS = $(SOURCES:.c=.o)

all: minishell
MEMORY_ALLOCATOR_SOURCES = memory-allocator/aborter.c memory-allocator/allocator.c
SOURCES = expander/expander_nonvariables.c utils.c handler.c lexer.c unquote.c lexer_utils.c parser.c expander.c splitter.c syntax_analyzer.c $(MEMORY_ALLOCATOR_SOURCES)

MINISHELL_SOURCES = main.c $(SOURCES)
MINISHELL_OBJECTS = $(MINISHELL_SOURCES:.c=.o)

TEST_PATH = tests
TEST_SOURCES = $(wildcard $(TEST_PATH)/*.c)
TEST_OBJECTS = $(TEST_SOURCES:.c=.o)

all: $(NAME)

$(TEST_PATH):
mkdir $(TEST_PATH)

test: $(TEST_PATH) $(NAME)
$(CC) $(SOURCES:.c=.o) $(LIBFT_PATH) $(TEST_SOURCES) -o $(TEST_PATH)/tests -lcriterion -L/usr/local/lib -I/usr/local/include -lreadline
./$(TEST_PATH)/tests

$(LIBFT_PATH):
make -C $(LIBFT_DIR)

minishell: $(LIBFT_PATH) $(OBJECTS)
gcc $(FLAGS) -o $(NAME) $(OBJECTS) $(LIBFT_PATH) -L/usr/local/lib -I/usr/local/include -lreadline
$(NAME): $(LIBFT_PATH) $(MINISHELL_OBJECTS)
$(CC) $(FLAGS) -o $(NAME) $(MINISHELL_OBJECTS) $(LIBFT_PATH) -L/usr/local/lib -I/usr/local/include -lreadline

%.o: %.c
gcc $(FLAGS) -c $< -o $@
$(CC) $(FLAGS) -c $< -o $@

clean:
rm -f $(OBJECTS)
rm -f $(MINISHELL_OBJECTS)

fclean: clean
rm -f $(NAME)
rm -f $(NAME) $(TEST_PATH)/tests

re: fclean all
28 changes: 8 additions & 20 deletions expander.c
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@ char *replace_string(char *input, int p_start, int p_len, char *replacement)
char *head;
char *tail;
char *result;

head = ft_substr(input, 0, p_start);
tail = ft_substr(input, p_start + p_len + 1, ft_strlen(input) - p_start - p_len);
if (replacement == NULL)
Expand Down Expand Up @@ -78,13 +78,13 @@ void expand_all_variables(char **string)
*string = str;
}



void expand(t_token **head)
{
t_token *token;
t_token **next_ptr;

token = *head;
next_ptr = head;
while (token)
{
/* only unquoted word and double-quoted word tokens are expandable. */
Expand All @@ -93,34 +93,22 @@ void expand(t_token **head)
expand_all_variables(&token->value);
/* only unquoted words are not protected for the split */
if (token->type == UNQUOTED_WORD)
internal_field_split(head, token);
internal_field_split(next_ptr);
}
next_ptr = &token->next;
token = token->next;
}
}

void internal_field_split(t_token **head, t_token *token)
void internal_field_split(t_token **token_ptr)
{
char **new_words;
int i;
t_token *list;
t_token *new;
t_token *token = *token_ptr;

new_words = str_split(token->value, is_internal_field_sep);
if (str_arr_size(new_words) == 1)
return; /* there is no new word */
safe_free(token->value);
token->value = new_words[0];
list = NULL;
i = 1; //skip first word
while (new_words[i])
{
new = lexer_data_new((t_token) {.type = DELIMITER});
lexer_data_append(&list, new);
new = lexer_data_new((t_token) {.type = UNQUOTED_WORD, .value = new_words[i]});
lexer_data_append(&list, new);
i++;
}
insert_uword_tokens(token_ptr, new_words);
safe_free(new_words);
lexer_data_insert(find_pointer_to_next(head, token), list);
}
46 changes: 46 additions & 0 deletions expander/expander_nonvariables.c
Original file line number Diff line number Diff line change
@@ -0,0 +1,46 @@
#include "../minishell.h"
#include "../utils.h"
#include "../libft/libft.h"
#include "../char_classification.h"
#include "../memory-allocator/allocator.h"
#include <stdio.h>

/**
* Removes the token from the list and inserts unquoted word tokens in its place.
*
* Example:
* k l m
* assume we have: UW D UW D UW
* and we call this function with the token 'l'
* with the strings: {"a", "b", "c", "d"}
*
* a b c d
* 1. a token list will be created: UW D UW D UW D UW
* 2. the token 'l' will be removed.
* 3. the token list will be inserted in place of the removed token.
* k a b c d m
* Result: UW D UW D UW D UW D UW D UW
* ~~~~~~~~~~~~~~~~~
*/
void insert_uword_tokens(t_token **token_ptr, char **strings)
{
t_token *token;
t_token *list;
t_token *new;
int i;

token = *token_ptr;
token->value = strings[0];
list = NULL;
i = 1;
while (strings[i])
{
new = lexer_data_new((t_token) {.type = DELIMITER, .next = NULL, .value = NULL});
lexer_data_append(&list, new);
new = lexer_data_new((t_token) {.type = UNQUOTED_WORD, .next = NULL, .value = strings[i]});
lexer_data_append(&list, new);
i++;
}
lexer_data_insert(token, list);
}

150 changes: 150 additions & 0 deletions handler.c
Original file line number Diff line number Diff line change
@@ -0,0 +1,150 @@
#include "minishell.h"
#include "libft/libft.h"
#include <readline/readline.h>
#include "memory-allocator/allocator.h"

/* debug function */
char const *token_type_to_string(t_token_type type)
{
if (type == UNKNOWN)
return "UNKNOWN";
if (type == UNQUOTED_WORD)
return "UNQUOTED_WORD";
if (type == DOUBLE_QUOTED_WORD)
return "DOUBLE_QUOTED_WORD";
if (type == SINGLE_QUOTED_WORD)
return "SINGLE_QUOTED_WORD";
if (type == PIPE)
return "PIPE";
if (type == INPUT_REDIRECTION)
return "INPUT_REDIRECTION";
if (type == HEREDOC_REDIRECTION)
return "HEREDOC_REDIRECTION";
if (type == OUTPUT_REDIRECTION)
return "OUTPUT_REDIRECTION";
if (type == APPEND_REDIRECTION)
return "APPEND_REDIRECTION";
if (type == DELIMITER)
return "DELIMITER";
return "UNRECOGNIZED";
}

# define LEXER_DEBUG
# define PARSER_DEBUG
void debug(t_token *token, t_command *cmd) {
(void) token,(void) cmd;

#ifdef PARSER_DEBUG
if (!cmd) {
printf("<No Command>");
return;
}
while (cmd)
{
printf("name: %s\nargs:", *cmd->name ? cmd->name : "(empty)");
if (cmd->args[0] == NULL)
printf("(no args)");
for (int i = 0; cmd->args[i]; i++)
printf("`%s` ", cmd->args[i]);
printf("\nredirections: ");
if (cmd->redirections[0].redirected == NULL)
printf("(no redirections)");
for (int i = 0; cmd->redirections[i].redirected; i++)
printf("`%s`(%s,%s,%s) ", cmd->redirections[i].redirected,
cmd->redirections[i].flags & INPUT ? "input" : "output",
cmd->redirections[i].flags & APPEND ? "append" : "no append",
cmd->redirections[i].flags & HEREDOC ? "heredoc" : "no heredoc");
printf("\n");
cmd = cmd->next;
}
#endif
#ifdef LEXER_DEBUG
if (!token) {
printf("<No Token>\n");
return;
}
while (token->next) {
if (is_word(token->type))
printf("\033[97m%s\033[37m(%s)->", token_type_to_string(token->type), token->value);
else
printf("\033[97m%s\033[37m->", token_type_to_string(token->type));
token = token->next;
}
if (is_word(token->type))
printf("\033[97m%s\033[37m(%s)\033[97m\n", token_type_to_string(token->type), token->value);
else
printf("\033[97m%s\n", token_type_to_string(token->type));
#endif
}

char *read_heredoc_input(char *eof)
{
char *line;
char *input;

line = NULL;
input = ft_strdup("");
while (1)
{
line = readline("> ");
if (!line || !*line || ft_strncmp(line, eof, INT_MAX) == 0)
break;
char *new = ft_str_arr_join((char *[]) {input, line, "\n"}, 3);
safe_free(input);
input = new;
safe_free(line);
}
char *new = ft_substr(input, 0, ft_strlen(input) - 1);
safe_free(input);
return new;
}

void handle_heredocs(t_command *cur)
{
int i;

while (cur)
{
i = 0;
while (cur->redirections[i].redirected)
{
if (cur->redirections[i].flags & HEREDOC)
{
char *input = read_heredoc_input(cur->redirections[i].redirected);
safe_free(cur->redirections[i].redirected);
cur->redirections[i].redirected = input;
}
i++;
}
cur = cur->next;
}
}

void handle_invalid_input(t_token *lexical_data)
{
ft_putstr_fd("\033[91mInvalid input\n\033[39m", 2);
uninit_tokens(lexical_data);
}

void handle_input(char *input)
{
t_token *lexer_data;
t_command *parser_data;

lexer_data = lex(input);
if (!is_valid(lexer_data))
return handle_invalid_input(lexer_data);
expand(&lexer_data);
unquote(lexer_data);
parser_data = parse(lexer_data);
handle_heredocs(parser_data);

debug(lexer_data, parser_data);
uninit_tokens(lexer_data);
}

void handle_memory_error(void)
{
ft_putstr_fd("Insufficent memory! Minishell aborting...", 2);
exit(1);
}
21 changes: 14 additions & 7 deletions lexer.c
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,8 @@ t_token_type get_meta_token_type(const char *input)
return (UNKNOWN);
}

lexer_state word_state(t_token **lexer_data, char *input, int *const index) {
lexer_state word_state(t_token **lexer_data, char *input, int *const index)
{
t_token token;
int start_index;

Expand All @@ -53,7 +54,8 @@ lexer_state word_state(t_token **lexer_data, char *input, int *const index) {
(*index)--;
}
token.value = ft_substr(input, start_index, *index - start_index + 1);
lexer_data_append(lexer_data, lexer_data_new(token));
if (ft_strlen(token.value) > 0)
lexer_data_append(lexer_data, lexer_data_new(token));
return ((lexer_state) delimiter_state);
}

Expand Down Expand Up @@ -86,7 +88,7 @@ lexer_state operator_state_l(t_token **lexer_data, char *input, int *const index
else if (token.type == APPEND_REDIRECTION || token.type == HEREDOC_REDIRECTION)
length = 2;
else
return NULL; //do not append new token
return NULL; // do not append new token
*index = *index + length - 1; // to mark where we stopped scanning
token.value = NULL;
lexer_data_append(lexer_data, lexer_data_new(token));
Expand All @@ -110,19 +112,24 @@ lexer_state delimiter_state(t_token **lexer_data, char *input, int *const index)
token.type = DELIMITER;
lexer_data_append(lexer_data, lexer_data_new(token));
}

else
{
*index = *index - 1;
}
if (is_meta_char(next_non_whitespace))
return (lexer_state) operator_state_l;
return ((lexer_state) word_state);
}

t_token *lex(char *input) {
t_token *lex(char *input)
{
t_token *token = NULL;
lexer_state next_state = (lexer_state) word_state;
int index;

index = 0;
while (next_state && input[index]) {
index = skip_white_spaces(input);
while (next_state && input[index])
{
next_state = (lexer_state) next_state(&token, input, &index);
index++;
}
Expand Down
Loading

0 comments on commit 54804a1

Please sign in to comment.