Skip to content

Commit

Permalink
Remove legacy code and rename Lock_Namespace
Browse files Browse the repository at this point in the history
to legacylockmgr namespace.
  • Loading branch information
alexbaden authored and andrewseidl committed Mar 4, 2020
1 parent 0b7d00b commit acf7232
Show file tree
Hide file tree
Showing 5 changed files with 16 additions and 159 deletions.
60 changes: 2 additions & 58 deletions LockMgr/LegacyLockMgr.h
Original file line number Diff line number Diff line change
Expand Up @@ -27,11 +27,7 @@
#include "Shared/types.h"
#include "ThriftHandler/QueryState.h"

namespace Lock_Namespace {

using namespace rapidjson;
using LockTypeContainer = boost::variant<mapd_shared_lock<mapd_shared_mutex>,
mapd_unique_lock<mapd_shared_mutex>>;
namespace legacylockmgr {

enum LockType { ExecutorOuterLock, LockMax };

Expand Down Expand Up @@ -67,56 +63,4 @@ std::shared_ptr<MutexType> LockMgr<MutexType, KeyType>::getMutex(const LockType
return tMutex;
}

ChunkKey getTableChunkKey(const Catalog_Namespace::Catalog& cat,
const std::string& tableName);
void getTableNames(std::map<std::string, bool>& tableNames, const Value& value);
void getTableNames(std::map<std::string, bool>& tableNames, const std::string query_ra);
std::string parse_to_ra(query_state::QueryStateProxy, const std::string& query_str);

template <typename MutexType>
std::shared_ptr<MutexType> getTableMutex(const Catalog_Namespace::Catalog& cat,
const std::string& tableName,
const Lock_Namespace::LockType lockType) {
return Lock_Namespace::LockMgr<MutexType, ChunkKey>::getMutex(
lockType, getTableChunkKey(cat, tableName));
}

template <typename MutexType, template <typename> class LockType>
LockType<MutexType> getTableLock(const Catalog_Namespace::Catalog& cat,
const std::string& tableName,
const Lock_Namespace::LockType lockType) {
auto lock = LockType<MutexType>(*getTableMutex<MutexType>(cat, tableName, lockType));
// "... we need to make sure that the table (and after alter column) the columns are
// still around after obtaining our locks ..."
auto chunkKey = getTableChunkKey(cat, tableName);
return lock;
}

template <typename MutexType>
void getTableLocks(const Catalog_Namespace::Catalog& cat,
const std::map<std::string, bool>& tableNames,
std::vector<std::shared_ptr<LockTypeContainer>>& tableLocks,
const Lock_Namespace::LockType lockType) {
for (const auto& tableName : tableNames) {
if (tableName.second) {
tableLocks.emplace_back(std::make_shared<LockTypeContainer>(
getTableLock<MutexType, mapd_unique_lock>(cat, tableName.first, lockType)));
} else {
tableLocks.emplace_back(std::make_shared<LockTypeContainer>(
getTableLock<MutexType, mapd_shared_lock>(cat, tableName.first, lockType)));
}
}
}

template <typename MutexType>
void getTableLocks(const Catalog_Namespace::Catalog& cat,
const std::string& query_ra,
std::vector<std::shared_ptr<LockTypeContainer>>& tableLocks,
const Lock_Namespace::LockType lockType) {
// parse ra to learn involved table names
std::map<std::string, bool> tableNames;
getTableNames(tableNames, query_ra);
getTableLocks<MutexType>(cat, tableNames, tableLocks, lockType);
}

} // namespace Lock_Namespace
} // namespace legacylockmgr
65 changes: 0 additions & 65 deletions LockMgr/LockMgr.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -40,68 +40,3 @@ ChunkKey chunk_key_for_table(const Catalog_Namespace::Catalog& cat,
} // namespace helpers

} // namespace lockmgr

namespace Lock_Namespace {

using namespace rapidjson;

void getTableNames(std::map<std::string, bool>& tableNames, const std::string query_ra) {
rapidjson::Document query_ast;
query_ast.Parse(query_ra.c_str());
CHECK(!query_ast.HasParseError());
CHECK(query_ast.IsObject());
getTableNames(tableNames, query_ast);
}

void getTableNames(std::map<std::string, bool>& tableNames, const Value& value) {
if (value.IsArray()) {
for (SizeType i = 0; i < value.Size(); ++i) {
getTableNames(tableNames, value[i]);
}
return;
} else if (value.IsObject()) {
for (auto mit = value.MemberBegin(); mit != value.MemberEnd(); ++mit) {
getTableNames(tableNames, mit->value);
}
} else {
return;
}

if (value.FindMember("rels") == value.MemberEnd()) {
return;
}
const auto& rels = value["rels"];
CHECK(rels.IsArray());
for (SizeType i = 0; i < rels.Size(); ++i) {
const auto& rel = rels[i];
const auto& relop = json_str(rel["relOp"]);
if (rel.FindMember("table") != rel.MemberEnd()) {
if ("EnumerableTableScan" == relop || "LogicalTableModify" == relop) {
const auto t = rel["table"].GetArray();
CHECK(t[1].IsString());
tableNames[t[1].GetString()] |= "LogicalTableModify" == relop;
}
}
}
}

ChunkKey getTableChunkKey(const Catalog_Namespace::Catalog& cat,
const std::string& tableName) {
if (const auto tdp = cat.getMetadataForTable(tableName, false)) {
ChunkKey chunk_key{cat.getCurrentDB().dbId, tdp->tableId};
return chunk_key;
} else {
throw std::runtime_error("Table " + tableName + " does not exist.");
}
}

std::string parse_to_ra(query_state::QueryStateProxy query_state_proxy,
const std::string& query_str) {
auto const session = query_state_proxy.getQueryState().getConstSessionInfo();
auto const& cat = session->getCatalog();
return cat.getCalciteMgr()
->process(query_state_proxy, query_str, {}, true, false, false, true)
.plan_result;
}

} // namespace Lock_Namespace
26 changes: 0 additions & 26 deletions LockMgr/LockMgrImpl.h
Original file line number Diff line number Diff line change
Expand Up @@ -24,9 +24,6 @@
#include "Catalog/Catalog.h"
#include "Shared/mapd_shared_mutex.h"
#include "Shared/types.h"
namespace Lock_Namespace {
void getTableNames(std::map<std::string, bool>& tableNames, const std::string query_ra);
}
namespace lockmgr {

using MutexType = mapd_shared_mutex;
Expand Down Expand Up @@ -88,29 +85,6 @@ class TableLockMgrImpl {
return table_mutex_map_[table_key];
}

static void getTableLocks(const Catalog_Namespace::Catalog& cat,
const std::map<std::string, bool>& table_names,
std::vector<TableLock>& table_locks) {
for (const auto& table_name_itr : table_names) {
TableLock table_lock;
if (table_name_itr.second) {
table_lock.write_lock = T::getWriteLockForTable(cat, table_name_itr.first);
} else {
table_lock.read_lock = T::getReadLockForTable(cat, table_name_itr.first);
}
table_locks.emplace_back(std::move(table_lock));
}
}

static void getTableLocks(const Catalog_Namespace::Catalog& cat,
const std::string& query_ra,
std::vector<TableLock>& table_locks) {
// parse ra to learn involved table names
std::map<std::string, bool> table_names;
Lock_Namespace::getTableNames(table_names, query_ra);
return T::getTableLocks(cat, table_names, table_locks);
}

static WriteLock getWriteLockForTable(const Catalog_Namespace::Catalog& cat,
const std::string& table_name) {
return helpers::getLockForTableImpl<WriteLock, T>(cat, table_name);
Expand Down
8 changes: 4 additions & 4 deletions Parser/ParserNode.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -2924,8 +2924,8 @@ void TruncateTableStmt::execute(const Catalog_Namespace::SessionInfo& session) {
// TODO: Removal of the FileMgr is not thread safe. Take a global system write lock when
// truncating a table
const auto execute_write_lock = mapd_unique_lock<mapd_shared_mutex>(
*Lock_Namespace::LockMgr<mapd_shared_mutex, bool>::getMutex(
Lock_Namespace::ExecutorOuterLock, true));
*legacylockmgr::LockMgr<mapd_shared_mutex, bool>::getMutex(
legacylockmgr::ExecutorOuterLock, true));

const auto td_with_lock =
lockmgr::TableSchemaLockContainer<lockmgr::WriteLock>::acquireTableDescriptor(
Expand Down Expand Up @@ -3550,8 +3550,8 @@ void CopyTableStmt::execute(const Catalog_Namespace::SessionInfo& session,

// Prevent simultaneous import / truncate (see TruncateTableStmt::execute)
const auto execute_read_lock = mapd_shared_lock<mapd_shared_mutex>(
*Lock_Namespace::LockMgr<mapd_shared_mutex, bool>::getMutex(
Lock_Namespace::ExecutorOuterLock, true));
*legacylockmgr::LockMgr<mapd_shared_mutex, bool>::getMutex(
legacylockmgr::ExecutorOuterLock, true));

const TableDescriptor* td{nullptr};
std::unique_ptr<lockmgr::TableSchemaLockContainer<lockmgr::ReadLock>> td_with_lock;
Expand Down
16 changes: 10 additions & 6 deletions ThriftHandler/MapDHandler.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -106,7 +106,6 @@

using Catalog_Namespace::Catalog;
using Catalog_Namespace::SysCatalog;
using namespace Lock_Namespace;

#define INVALID_SESSION_ID ""

Expand Down Expand Up @@ -994,7 +993,8 @@ void MapDHandler::sql_execute_df(TDataFrame& _return,
_return.execution_time_ms = 0;

mapd_shared_lock<mapd_shared_mutex> executeReadLock(
*LockMgr<mapd_shared_mutex, bool>::getMutex(ExecutorOuterLock, true));
*legacylockmgr::LockMgr<mapd_shared_mutex, bool>::getMutex(
legacylockmgr::ExecutorOuterLock, true));

SQLParser parser;
std::list<std::unique_ptr<Parser::Stmt>> parse_trees;
Expand Down Expand Up @@ -1299,7 +1299,8 @@ void MapDHandler::validate_rel_alg(TTableDescriptor& _return,
QueryStateProxy query_state_proxy) {
try {
const auto execute_read_lock = mapd_shared_lock<mapd_shared_mutex>(
*LockMgr<mapd_shared_mutex, bool>::getMutex(ExecutorOuterLock, true));
*legacylockmgr::LockMgr<mapd_shared_mutex, bool>::getMutex(
legacylockmgr::ExecutorOuterLock, true));

// TODO(adb): for a validate query we do not need write locks, though the lock would
// generally be short lived.
Expand Down Expand Up @@ -5037,7 +5038,8 @@ void MapDHandler::sql_execute_impl(TQueryResult& _return,
ParserWrapper pw{query_str};
if (pw.isCalcitePathPermissable(read_only_)) {
executeReadLock = mapd_shared_lock<mapd_shared_mutex>(
*LockMgr<mapd_shared_mutex, bool>::getMutex(ExecutorOuterLock, true));
*legacylockmgr::LockMgr<mapd_shared_mutex, bool>::getMutex(
legacylockmgr::ExecutorOuterLock, true));

std::string query_ra;
_return.execution_time_ms += measure<>::execution([&]() {
Expand Down Expand Up @@ -5176,7 +5178,8 @@ void MapDHandler::sql_execute_impl(TQueryResult& _return,

// Prevent any other query from running while doing validate
executeWriteLock = mapd_unique_lock<mapd_shared_mutex>(
*LockMgr<mapd_shared_mutex, bool>::getMutex(ExecutorOuterLock, true));
*legacylockmgr::LockMgr<mapd_shared_mutex, bool>::getMutex(
legacylockmgr::ExecutorOuterLock, true));

std::string output{"Result for validate"};
if (g_cluster && leaf_aggregator_.leafCount()) {
Expand Down Expand Up @@ -5322,7 +5325,8 @@ void MapDHandler::sql_execute_impl(TQueryResult& _return,
// write lock could be saving us for now

executeWriteLock = mapd_unique_lock<mapd_shared_mutex>(
*LockMgr<mapd_shared_mutex, bool>::getMutex(ExecutorOuterLock, true));
*legacylockmgr::LockMgr<mapd_shared_mutex, bool>::getMutex(
legacylockmgr::ExecutorOuterLock, true));

CHECK(!checkpoint_lock.mutex());
checkpoint_lock = lockmgr::InsertDataLockMgr::getWriteLockForTable(
Expand Down

0 comments on commit acf7232

Please sign in to comment.