mirror of
https://github.com/jorenchik/mdemory.git
synced 2026-03-22 00:26:21 +00:00
transpiler comments and refactoring
This commit is contained in:
@@ -5,7 +5,6 @@
|
||||
#include <string>
|
||||
#include <vector>
|
||||
#include <map>
|
||||
#include <algorithm>
|
||||
#include <sstream>
|
||||
#include <format>
|
||||
|
||||
@@ -15,123 +14,85 @@
|
||||
#include "parser.h"
|
||||
#include "stringUtils.h"
|
||||
|
||||
struct QuestionElement {
|
||||
bool isDash;
|
||||
bool isGroup;
|
||||
std::string content;
|
||||
};
|
||||
typedef std::map<TokenType, std::vector<TokenType>> TokenAutomata;
|
||||
|
||||
std::string MultiElementQuestion::toString() const {
|
||||
std::stringstream ss;
|
||||
for (const auto& choice : choices) {
|
||||
char opener;
|
||||
if (type == MultiElementType::Order) {
|
||||
opener = '^';
|
||||
} else if (choice.isCorrect) {
|
||||
opener = '+';
|
||||
} else {
|
||||
opener = '-';
|
||||
}
|
||||
ss << opener << " " << choice.answer << "; ";
|
||||
}
|
||||
return std::format(
|
||||
"<Multiple element>\nsection:{}\nid:{}\n{}\n{}",
|
||||
section,
|
||||
cooldown,
|
||||
questionText,
|
||||
ss.str()
|
||||
);
|
||||
}
|
||||
|
||||
std::string GroupQuestion::toString() const {
|
||||
std::stringstream ss;
|
||||
for (auto group: groups) {
|
||||
ss << group.name << ": ";
|
||||
for (auto el: group.elements) {
|
||||
ss << el << ", ";
|
||||
}
|
||||
ss << "; ";
|
||||
}
|
||||
return std::format(
|
||||
"<GroupQuestion>\nsection:{}\nid:{}\n{}\n{}",
|
||||
section,
|
||||
cooldown,
|
||||
questionText,
|
||||
ss.str()
|
||||
);
|
||||
}
|
||||
|
||||
// Automaton for validating token transitions
|
||||
std::map<TokenType, std::vector<TokenType>> automata;
|
||||
|
||||
bool contains(const std::vector<TokenType>& vec, TokenType element) {
|
||||
return std::find(vec.begin(), vec.end(), element) != vec.end();
|
||||
}
|
||||
|
||||
// Automata for validating the parser state
|
||||
std::map<TokenType, std::vector<TokenType>> parserAutomata() {
|
||||
std::map<TokenType, std::vector<TokenType>> automata;
|
||||
automata[TokenType::TextFragment] = {
|
||||
TokenAutomata *automata = nullptr;
|
||||
/*
|
||||
* Galīgs automāts, kas nosaka, kādā secībā ir var būt tekstvienības.
|
||||
* */
|
||||
void initParserAutomata() {
|
||||
automata = new TokenAutomata;
|
||||
(*automata)[TokenType::TextFragment] = {
|
||||
TokenType::QuestionEnd,
|
||||
TokenType::ElementDashStart,
|
||||
TokenType::ElementPlusStart,
|
||||
TokenType::MatchGroupEnd,
|
||||
TokenType::EndOfFile,
|
||||
};
|
||||
automata[TokenType::MatchGroupEnd] = {
|
||||
(*automata)[TokenType::MatchGroupEnd] = {
|
||||
TokenType::ElementDashStart
|
||||
};
|
||||
automata[TokenType::QuestionEnd] = {
|
||||
(*automata)[TokenType::QuestionEnd] = {
|
||||
TokenType::ElementDashStart,
|
||||
TokenType::ElementPlusStart
|
||||
};
|
||||
automata[TokenType::ElementDashStart] = {
|
||||
(*automata)[TokenType::ElementDashStart] = {
|
||||
TokenType::CooldownStart,
|
||||
TokenType::TextFragment,
|
||||
TokenType::ElementOrderModifier
|
||||
};
|
||||
automata[TokenType::ElementOrderModifier] = {
|
||||
(*automata)[TokenType::ElementOrderModifier] = {
|
||||
TokenType::TextFragment
|
||||
};
|
||||
automata[TokenType::ElementPlusStart] = {
|
||||
(*automata)[TokenType::ElementPlusStart] = {
|
||||
TokenType::TextFragment
|
||||
};
|
||||
automata[TokenType::Cooldown] = {
|
||||
(*automata)[TokenType::Cooldown] = {
|
||||
TokenType::CooldownEnd,
|
||||
};
|
||||
automata[TokenType::CooldownStart] = {
|
||||
(*automata)[TokenType::CooldownStart] = {
|
||||
TokenType::Cooldown
|
||||
};
|
||||
automata[TokenType::CooldownEnd] = {
|
||||
(*automata)[TokenType::CooldownEnd] = {
|
||||
TokenType::TextFragment
|
||||
};
|
||||
automata[TokenType::StartOfFile] = {
|
||||
(*automata)[TokenType::StartOfFile] = {
|
||||
TokenType::TextFragment,
|
||||
TokenType::ElementDashStart,
|
||||
TokenType::EndOfFile
|
||||
};
|
||||
automata[TokenType::EndOfFile] = {};
|
||||
return automata;
|
||||
}
|
||||
|
||||
std::string capitalize(const std::string& str) {
|
||||
if (str.empty()) return str;
|
||||
std::string result = str;
|
||||
result[0] = std::towupper(result[0]);
|
||||
return result;
|
||||
(*automata)[TokenType::EndOfFile] = {};
|
||||
}
|
||||
|
||||
/*
|
||||
* Pārbauda, vai vai tekstvienību sarakstu akceptē atbilst atbilst valodas
|
||||
* automāts.
|
||||
* */
|
||||
Result<NoneType> ValidateGrammar(const std::vector<Token>& tokens) {
|
||||
automata = parserAutomata();
|
||||
if (!automata) {
|
||||
initParserAutomata();
|
||||
}
|
||||
for (size_t i = 0; i < tokens.size() - 1; ++i) {
|
||||
Token token = tokens[i];
|
||||
Token nextToken = tokens[i + 1];
|
||||
if (!contains(automata[token.tokenType], nextToken.tokenType)) {
|
||||
if (
|
||||
std::find(
|
||||
(*automata)[token.tokenType].begin(),
|
||||
(*automata)[token.tokenType].end(),
|
||||
nextToken.tokenType
|
||||
) == (*automata)[token.tokenType].end()) {
|
||||
|
||||
auto capitalize = [](const std::string& str) {
|
||||
if (str.empty()) return str;
|
||||
std::string result = str;
|
||||
result[0] = std::towupper(result[0]);
|
||||
return result;
|
||||
};
|
||||
return {
|
||||
.error=std::format(
|
||||
"Invalid token sequence: {} cannot precede {}",
|
||||
std::string(capitalize(Token::ToString(&token.tokenType))),
|
||||
std::string(capitalize(Token::ToString(&nextToken.tokenType)))
|
||||
std::string(capitalize(Token::toString(&token.tokenType))),
|
||||
std::string(capitalize(Token::toString(&nextToken.tokenType)))
|
||||
),
|
||||
.row=token.row,
|
||||
.column=token.column
|
||||
@@ -141,17 +102,6 @@ Result<NoneType> ValidateGrammar(const std::vector<Token>& tokens) {
|
||||
return {};
|
||||
}
|
||||
|
||||
time_t parseToUTCTime(const std::string datetime, std::string format) {
|
||||
std::tm tm = {};
|
||||
std::istringstream ss(datetime);
|
||||
ss >> std::get_time(&tm, format.c_str());
|
||||
if (ss.fail()) {
|
||||
throw std::runtime_error("Failed to parse datetime string");
|
||||
}
|
||||
std::time_t time = timegm(&tm);
|
||||
return time;
|
||||
}
|
||||
|
||||
// @Fix: Prevent duplicate group names and questions in ordered question (to
|
||||
// simplify checking in practice).
|
||||
Result<ParseInfo> parseQuestions(const std::vector<Token>& tokens) {
|
||||
@@ -192,6 +142,17 @@ Result<ParseInfo> parseQuestions(const std::vector<Token>& tokens) {
|
||||
|
||||
if (isInBounds(i) && tokens[i].tokenType == TokenType::TextFragment) {
|
||||
try {
|
||||
auto parseToUTCTime = [](const std::string datetime, std::string format) {
|
||||
std::tm tm = {};
|
||||
std::istringstream ss(datetime);
|
||||
ss >> std::get_time(&tm, format.c_str());
|
||||
if (ss.fail()) {
|
||||
throw std::runtime_error("Failed to parse datetime string");
|
||||
}
|
||||
std::time_t time = timegm(&tm);
|
||||
return time;
|
||||
};
|
||||
|
||||
time = parseToUTCTime(tokens[i].content.c_str(), "%d.%m.%Y %H:%M");
|
||||
} catch (std::exception e) {
|
||||
return makeResult(
|
||||
@@ -396,3 +357,43 @@ Result<ParseInfo> parseQuestions(const std::vector<Token>& tokens) {
|
||||
Token()
|
||||
);
|
||||
}
|
||||
|
||||
std::string MultiElementQuestion::toString() const {
|
||||
std::stringstream ss;
|
||||
for (const auto& choice : choices) {
|
||||
char opener;
|
||||
if (type == MultiElementType::Order) {
|
||||
opener = '^';
|
||||
} else if (choice.isCorrect) {
|
||||
opener = '+';
|
||||
} else {
|
||||
opener = '-';
|
||||
}
|
||||
ss << opener << " " << choice.answer << "; ";
|
||||
}
|
||||
return std::format(
|
||||
"<Multiple element>\nsection:{}\nid:{}\n{}\n{}",
|
||||
section,
|
||||
cooldown,
|
||||
questionText,
|
||||
ss.str()
|
||||
);
|
||||
}
|
||||
|
||||
std::string GroupQuestion::toString() const {
|
||||
std::stringstream ss;
|
||||
for (auto group: groups) {
|
||||
ss << group.name << ": ";
|
||||
for (auto el: group.elements) {
|
||||
ss << el << ", ";
|
||||
}
|
||||
ss << "; ";
|
||||
}
|
||||
return std::format(
|
||||
"<GroupQuestion>\nsection:{}\nid:{}\n{}\n{}",
|
||||
section,
|
||||
cooldown,
|
||||
questionText,
|
||||
ss.str()
|
||||
);
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user