diff --git a/src/cpp/include/lexer.h b/src/cpp/include/lexer.h index 10cebb1..94443bd 100644 --- a/src/cpp/include/lexer.h +++ b/src/cpp/include/lexer.h @@ -8,8 +8,10 @@ enum class TokenType { TextFragment, QuestionEnd, + MatchGroupEnd, ElementDashStart, ElementPlusStart, + ElementOrderModifier, Identifier, IdentifierStart, IdentifierEnd, diff --git a/src/cpp/include/parser.h b/src/cpp/include/parser.h index 610f5a6..d903182 100644 --- a/src/cpp/include/parser.h +++ b/src/cpp/include/parser.h @@ -6,32 +6,44 @@ #include "lexer.h" #include "result.h" + struct Question { virtual std::string ToString() const = 0; - virtual ~Question() = default; + virtual ~Question() = default; }; struct Choice { std::string Answer; - bool IsCorrect; + bool IsCorrect; }; -struct SingleAnswerQuestion : public Question { - std::string ID; - std::string QuestionText; - std::string Answer; - std::string Section; - - std::string ToString() const override; +enum MultiElementType { + Regular = 0, + MultiChoice, + Order }; -struct MultipleChoiceQuestion : public Question { - std::string ID; - std::string QuestionText; - std::vector Choices; - std::string Section; +struct MultiElementQuestion : public Question { + std::string ID; + std::string QuestionText; + std::vector Choices; + std::string Section; + MultiElementType type; - std::string ToString() const override; + std::string ToString() const override; +}; + +struct Group { + std::string name; + std::string elements; +}; + +struct GroupQuestion : public Question { + std::string ID; + std::string QuestionText; + std::vector Groups; + + std::string ToString() const override; }; Result> ParseQuestions(const std::vector& tokens); diff --git a/src/cpp/qtapp/.cache/clangd/index/main.cpp.6DE93E662B25E657.idx b/src/cpp/qtapp/.cache/clangd/index/main.cpp.6DE93E662B25E657.idx index f2fa273..e73b53f 100644 Binary files a/src/cpp/qtapp/.cache/clangd/index/main.cpp.6DE93E662B25E657.idx and b/src/cpp/qtapp/.cache/clangd/index/main.cpp.6DE93E662B25E657.idx differ diff --git a/src/cpp/qtapp/main.cpp b/src/cpp/qtapp/main.cpp index 5f49a18..c727fc2 100644 --- a/src/cpp/qtapp/main.cpp +++ b/src/cpp/qtapp/main.cpp @@ -199,17 +199,7 @@ void CreateMdems(std::vector& questions) { }; for (size_t i = 0; i < questions.size(); ++i) { - if (SingleAnswerQuestion* sa = dynamic_cast(questions[i])) { - mdems[i]->wFrontText->setText(QString::fromStdString(sa->QuestionText)); - auto answer = sa->Answer; - answer = transformAnswer(answer); - answer = std::format("- {}", answer); - mdems[i]->backLabels[0]->setText(QString::fromStdString(answer)); - if (mdems[i]->wBack->isVisible()) { - mdems[i]->wBack->hide(); - } - mdems[i]->labelCount = 1; - } else if (MultipleChoiceQuestion* mw = dynamic_cast(questions[i])) { + if (AnswerQuestion* mw = dynamic_cast(questions[i])) { mdems[i]->wFrontText->setText( QString::fromStdString(mw->QuestionText) ); diff --git a/src/cpp/transpiler/lexer.cpp b/src/cpp/transpiler/lexer.cpp index 81446fd..32049ff 100644 --- a/src/cpp/transpiler/lexer.cpp +++ b/src/cpp/transpiler/lexer.cpp @@ -199,6 +199,26 @@ Result> TokenizeMdem(const std::string& fileRunes) { previousColumn = column; textStarted = false; break; + case '^': + makeTokenWithTokenBuffer( + TokenType::ElementOrderModifier, + 1, + TokenType::TextFragment + ); + previousRow = row; + previousColumn = column; + textStarted = false; + break; + case ':': + makeTokenWithTokenBuffer( + TokenType::MatchGroupEnd, + 1, + TokenType::TextFragment + ); + previousRow = row; + previousColumn = column; + textStarted = false; + break; case '>': makeTokenWithTokenBuffer( TokenType::QuestionEnd, @@ -255,7 +275,9 @@ std::string Token::ToString(const TokenType* ttype) { switch (*ttype) { case TokenType::TextFragment: return "text fragment"; case TokenType::QuestionEnd: return "question end symbol"; + case TokenType::MatchGroupEnd: return "match group end"; case TokenType::ElementDashStart: return "dash element start"; + case TokenType::ElementOrderModifier: return "order element modifier"; case TokenType::ElementPlusStart: return "plus element start"; case TokenType::Identifier: return "identifier"; case TokenType::IdentifierStart: return "start of identifier"; diff --git a/src/cpp/transpiler/parser.cpp b/src/cpp/transpiler/parser.cpp index ce6f46e..fdbbf91 100644 --- a/src/cpp/transpiler/parser.cpp +++ b/src/cpp/transpiler/parser.cpp @@ -14,20 +14,11 @@ struct QuestionElement { bool isDash; + bool isGroup; std::string content; }; -std::string SingleAnswerQuestion::ToString() const { - return std::format( - ":{} section:{} id:{} answer:{}", - QuestionText, - Section, - ID, - Answer - ); -} - -std::string MultipleChoiceQuestion::ToString() const { +std::string MultiElementQuestion::ToString() const { std::stringstream choiceOut; for (const auto& choice : Choices) { char opener; @@ -61,18 +52,26 @@ std::map> parserAutomata() { TokenType::QuestionEnd, TokenType::ElementDashStart, TokenType::ElementPlusStart, + TokenType::MatchGroupEnd, TokenType::SectionIdentifierStart, TokenType::SectionStart, TokenType::EndOfFile, TokenType::SectionEnd }; + automata[TokenType::MatchGroupEnd] = { + TokenType::ElementDashStart + }; automata[TokenType::QuestionEnd] = { TokenType::ElementDashStart, TokenType::ElementPlusStart }; automata[TokenType::ElementDashStart] = { TokenType::IdentifierStart, - TokenType::TextFragment + TokenType::TextFragment, + TokenType::ElementOrderModifier + }; + automata[TokenType::ElementOrderModifier] = { + TokenType::TextFragment }; automata[TokenType::ElementPlusStart] = { TokenType::TextFragment @@ -165,7 +164,10 @@ Result> ParseQuestions(const std::vector& tokens) std::string id, questionText; std::vector questionElements; - // Parsing for a single question or multiple choice question + bool isOrderQuestion = false; + bool isGroupQuestion = false; + + // Start element parsing & add to the offset. if (tokens[i + 1].tokenType == TokenType::IdentifierStart) { id = tokens[i + 2].content; questionText = tokens[i + 4].content; @@ -176,17 +178,30 @@ Result> ParseQuestions(const std::vector& tokens) i += 3; } - while (true) { - if (i + 3 < tokens.size() && tokens[i + 3].tokenType != TokenType::EndOfFile) { - size_t offset = tokens[i + 1].tokenType == TokenType::IdentifierStart ? 5 : 2; - if (tokens[i].tokenType == TokenType::SectionIdentifierStart || - tokens[i].tokenType == TokenType::SectionEnd) { - break; - } - if (i + offset < tokens.size() && tokens[i + offset].tokenType == TokenType::QuestionEnd) { + auto isInBounds = [tokens](size_t i) { + return i < tokens.size() && tokens[i].tokenType != TokenType::EndOfFile; + }; + + // Parse elements of a question. + while (isInBounds(i)) { + + // Handle other constructs. + if (tokens[i].tokenType == TokenType::SectionIdentifierStart) { + break; + } + if (tokens[i].tokenType == TokenType::SectionEnd) { + break; + } + + // Check question end. + if (tokens[i].tokenType == TokenType::ElementDashStart && isInBounds(i + 3)) { + // Distance to the possible question end. + size_t offset = tokens[i + 1].tokenType == TokenType::IdentifierStart ? 5 : 2; + if (isInBounds(i + offset) && tokens[i + offset].tokenType == TokenType::QuestionEnd) { break; } if (offset == 5 && tokens[i + 5].tokenType != TokenType::QuestionEnd) { + // Cannot place the identifier on the ordinary element. return { questions, "Invalid identifier placement", @@ -195,42 +210,72 @@ Result> ParseQuestions(const std::vector& tokens) }; } } - if (i + 2 >= tokens.size()) { - break; - } + + // Determine element type. + bool isDash; + bool isGroup = false; + if (isInBounds(i+1) && tokens[i].tokenType == TokenType::ElementOrderModifier) { + isOrderQuestion = true; + if (!isDash) { + // TODO: err + } + } + if (tokens[i].tokenType == TokenType::ElementDashStart) { + isDash = true; + if (isOrderQuestion) { + // TODO: err + } + } else { + isDash = false; + } + if (isInBounds(i+2) && tokens[i].tokenType == TokenType::MatchGroupEnd) { + isGroup = true; + isGroupQuestion = true; + if (!isDash) { + // TODO: err + } + if (isOrderQuestion) { + // TODO: err + } + } - // Create question elements QuestionElement questionElement; - questionElement.isDash = (tokens[i].tokenType == TokenType::ElementDashStart); + questionElement.isDash = isDash; + questionElement.isGroup = isGroup; questionElement.content = tokens[i + 1].content; questionElements.push_back(questionElement); - i += 2; + + size_t offset = 2; + if (isOrderQuestion) { + offset += 1; + } + if (isGroup) { + offset += 1; + } + + i += offset; } if (questionElements.size() > 1) { - auto* mcq = new MultipleChoiceQuestion(); - mcq->ID = id; - mcq->QuestionText = questionText; - for (const auto& elem : questionElements) { - Choice choice; - choice.Answer = elem.content; - choice.IsCorrect = !elem.isDash; - mcq->Choices.push_back(choice); - } - mcq->Section = section; - questions.push_back(mcq); - if (debug) { - std::cout << mcq->ToString() << "\n"; - } - } else if (questionElements.size() == 1) { - auto* saq = new SingleAnswerQuestion(); - saq->ID = id; - saq->QuestionText = questionText; - saq->Answer = questionElements[0].content; - saq->Section = section; - questions.push_back(saq); - if (debug) { - std::cout << saq->ToString() << "\n"; + if (isGroupQuestion) { + GroupQuestion *question = new GroupQuestion(); + // TODO + + } if (isOrderQuestion) { + auto *question = new MultiElementQuestion(); + question->ID = id; + question->QuestionText = questionText; + for (const auto& elem : questionElements) { + Choice choice; + choice.Answer = elem.content; + choice.IsCorrect = !elem.isDash; + question->Choices.push_back(choice); + } + question->Section = section; + questions.push_back(question); + if (debug) { + std::cout << question->ToString() << "\n"; + } } } } else if (tokens[i].tokenType == TokenType::SectionIdentifierStart) { diff --git a/tasks.md b/tasks.md new file mode 100644 index 0000000..9caf4d6 --- /dev/null +++ b/tasks.md @@ -0,0 +1,37 @@ +## 1-N Answer question + +- kfoewf > + - fiewfiwe + +- kfoewf > + - fiewfiwe + - fewpfowe + - fweofopew + - ofpewpofkew + +## Order question + +- ewjpfwe > + ^- ewijfew + ^- ewijfew + ^- ewijfew + ^- ewijfew + ^- ewijfew + +## Match question + +- iowjefew > + - fiwfo: + - fewfew + - ifoewf + - ejpfe: + - _ + - fewfw: + - fewjpfe + - fioewf + +- [ ] Augment the lexer for Order and match question; +- [ ] Parse the questions accordingly; +- [ ] Escape character; + +