mirror of
https://github.com/FirebirdSQL/firebird.git
synced 2025-01-23 18:03:04 +01:00
Commit the forgotten files.
This commit is contained in:
parent
e342019993
commit
49f2ecf703
373
src/jrd/GlobalRWLock.cpp
Normal file
373
src/jrd/GlobalRWLock.cpp
Normal file
@ -0,0 +1,373 @@
|
||||
/*
|
||||
* PROGRAM: JRD Access Method
|
||||
* MODULE: GlobalRWLock.cpp
|
||||
* DESCRIPTION: GlobalRWLock
|
||||
*
|
||||
* The contents of this file are subject to the Initial
|
||||
* Developer's Public License Version 1.0 (the "License");
|
||||
* you may not use this file except in compliance with the
|
||||
* License. You may obtain a copy of the License at
|
||||
* http://www.ibphoenix.com/main.nfs?a=ibphoenix&page=ibp_idpl.
|
||||
*
|
||||
* Software distributed under the License is distributed AS IS,
|
||||
* WITHOUT WARRANTY OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing rights
|
||||
* and limitations under the License.
|
||||
*
|
||||
* The Original Code was created by Nickolay Samofatov
|
||||
* for the Firebird Open Source RDBMS project.
|
||||
*
|
||||
* Copyright (c) 2006 Nickolay Samofatov
|
||||
* and all contributors signed below.
|
||||
*
|
||||
* All Rights Reserved.
|
||||
* Contributor(s): ______________________________________.
|
||||
*
|
||||
*
|
||||
*/
|
||||
|
||||
#include "firebird.h"
|
||||
#include "GlobalRWLock.h"
|
||||
#include "../lock/lock.h"
|
||||
#include "../lock/lock_proto.h"
|
||||
#include "iberr_proto.h"
|
||||
#include "isc_proto.h"
|
||||
#include "jrd.h"
|
||||
#include "lck_proto.h"
|
||||
#include "err_proto.h"
|
||||
|
||||
#ifdef COS_DEBUG
|
||||
IMPLEMENT_TRACE_ROUTINE(cos_trace, "COS")
|
||||
#endif
|
||||
|
||||
namespace Jrd {
|
||||
|
||||
int GlobalRWLock::blocking_ast_cached_lock(void* ast_object)
|
||||
{
|
||||
Jrd::GlobalRWLock *GlobalRWLock =
|
||||
static_cast<Jrd::GlobalRWLock*>(ast_object);
|
||||
|
||||
ISC_ast_enter();
|
||||
|
||||
/* Since this routine will be called asynchronously, we must establish
|
||||
a thread context. */
|
||||
Jrd::thread_db thd_context, *tdbb;
|
||||
JRD_set_thread_data(tdbb, thd_context);
|
||||
|
||||
ISC_STATUS_ARRAY ast_status;
|
||||
Jrd::Database* dbb = GlobalRWLock->cached_lock->lck_dbb;
|
||||
|
||||
tdbb->tdbb_database = dbb;
|
||||
tdbb->tdbb_attachment = NULL;
|
||||
tdbb->tdbb_quantum = QUANTUM;
|
||||
tdbb->tdbb_request = NULL;
|
||||
tdbb->tdbb_transaction = NULL;
|
||||
tdbb->tdbb_status_vector = ast_status;
|
||||
|
||||
GlobalRWLock->blockingAstHandler(tdbb);
|
||||
|
||||
/* Restore the prior thread context */
|
||||
|
||||
JRD_restore_thread_data();
|
||||
|
||||
ISC_ast_exit();
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
GlobalRWLock::GlobalRWLock(thread_db* tdbb, MemoryPool& p, locktype_t lckType,
|
||||
size_t lockLen, const UCHAR* lockStr, lck_owner_t physical_lock_owner,
|
||||
lck_owner_t default_logical_lock_owner, bool lock_caching)
|
||||
: PermanentStorage(p), internal_blocking(0), external_blocking(false),
|
||||
physicalLockOwner(physical_lock_owner), defaultLogicalLockOwner(default_logical_lock_owner),
|
||||
lockCaching(lock_caching), readers(p)
|
||||
{
|
||||
SET_TDBB(tdbb);
|
||||
|
||||
Database* dbb = tdbb->tdbb_database;
|
||||
|
||||
cached_lock = FB_NEW_RPT(getPool(), lockLen) Lock();
|
||||
cached_lock->lck_type = static_cast<Jrd::lck_t>(lckType);
|
||||
cached_lock->lck_owner_handle = 0;
|
||||
cached_lock->lck_length = lockLen;
|
||||
|
||||
cached_lock->lck_dbb = dbb;
|
||||
cached_lock->lck_parent = dbb->dbb_lock;
|
||||
cached_lock->lck_object = reinterpret_cast<blk*>(this);
|
||||
cached_lock->lck_ast = lockCaching ? blocking_ast_cached_lock : NULL;
|
||||
memcpy(&cached_lock->lck_key, lockStr, lockLen);
|
||||
|
||||
writer.owner_handle = 0;
|
||||
writer.entry_count = 0;
|
||||
}
|
||||
|
||||
GlobalRWLock::~GlobalRWLock()
|
||||
{
|
||||
thread_db* tdbb = JRD_get_thread_data();
|
||||
LCK_release(tdbb, cached_lock);
|
||||
delete cached_lock;
|
||||
}
|
||||
|
||||
bool GlobalRWLock::lock(thread_db* tdbb, locklevel_t level, SSHORT wait, SLONG owner_handle)
|
||||
{
|
||||
SET_TDBB(tdbb);
|
||||
fb_assert(owner_handle);
|
||||
|
||||
{ // this is a first scope for a code where counters are locked
|
||||
CountersLockHolder lockHolder(lockMutex);
|
||||
|
||||
COS_TRACE(("lock type=%i, level=%i, readerscount=%i, owner=%i", cached_lock->lck_type, level, readers.getCount(), owner_handle));
|
||||
// Check if this is a recursion case
|
||||
size_t n;
|
||||
if (level == LCK_read) {
|
||||
if (readers.find(owner_handle, n)) {
|
||||
readers[n].entry_count++;
|
||||
return true;
|
||||
}
|
||||
}
|
||||
else {
|
||||
if (writer.owner_handle == owner_handle) {
|
||||
writer.entry_count++;
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
bool all_compatible = !writer.entry_count && (level == LCK_read || readers.getCount() == 0);
|
||||
|
||||
// We own the lock and all present requests are compatible with us
|
||||
// In case of any congestion we force all requests through the lock
|
||||
// manager to ensure lock ordering.
|
||||
if (cached_lock->lck_physical >= level && all_compatible &&
|
||||
!internal_blocking && !external_blocking)
|
||||
{
|
||||
if (level == LCK_read) {
|
||||
ObjectOwnerData ownerData;
|
||||
ownerData.owner_handle = owner_handle;
|
||||
ownerData.entry_count++;
|
||||
readers.insert(n, ownerData);
|
||||
}
|
||||
else
|
||||
{
|
||||
writer.owner_handle = owner_handle;
|
||||
writer.entry_count++;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
// We need to release lock to get new level lock
|
||||
if ( (cached_lock->lck_physical > 0) && (writer.entry_count == 0) && (readers.getCount() == 0) )
|
||||
{
|
||||
LCK_release(tdbb, cached_lock);
|
||||
invalidate(tdbb, false);
|
||||
external_blocking = false;
|
||||
COS_TRACE(("release our lock to get new level lock, type=%i, level=%i", cached_lock->lck_type, cached_lock->lck_physical));
|
||||
}
|
||||
|
||||
internal_blocking++;
|
||||
}
|
||||
|
||||
// There is some congestion. Need to use the lock manager.
|
||||
// Request new lock at the new level. Several concurrent lock requests may
|
||||
// wait here in the same process in parallel.
|
||||
Lock* newLock = FB_NEW_RPT(getPool(), cached_lock->lck_length) Lock;
|
||||
newLock->lck_type = cached_lock->lck_type;
|
||||
newLock->lck_owner_handle = owner_handle;
|
||||
newLock->lck_length = cached_lock->lck_length;
|
||||
|
||||
newLock->lck_dbb = cached_lock->lck_dbb;
|
||||
newLock->lck_parent = cached_lock->lck_parent;
|
||||
newLock->lck_object = cached_lock->lck_object;
|
||||
newLock->lck_ast = cached_lock->lck_ast;
|
||||
memcpy(&newLock->lck_key, &cached_lock->lck_key, cached_lock->lck_length);
|
||||
|
||||
COS_TRACE(("request new lock, type=%i, level=%i", cached_lock->lck_type, level));
|
||||
if (!LCK_lock(tdbb, newLock, level, wait)) {
|
||||
COS_TRACE(("Can't get a lock"));
|
||||
delete newLock;
|
||||
return false;
|
||||
}
|
||||
COS_TRACE(("Lock is got, type=%i", cached_lock->lck_type));
|
||||
|
||||
{ // this is a second scope for a code where counters are locked
|
||||
CountersLockHolder lockHolder(lockMutex);
|
||||
|
||||
fb_assert(internal_blocking > 0);
|
||||
internal_blocking--;
|
||||
|
||||
// Here old lock is not protecting shared object. We must refresh state by fetch.
|
||||
if (newLock->lck_physical >= LCK_read) {
|
||||
try {
|
||||
fetch(tdbb);
|
||||
}
|
||||
catch(const std::exception&) {
|
||||
LCK_release(tdbb, newLock);
|
||||
delete newLock;
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
if (level == LCK_read) {
|
||||
ObjectOwnerData ownerData;
|
||||
ownerData.entry_count++;
|
||||
ownerData.owner_handle = owner_handle;
|
||||
readers.add(ownerData);
|
||||
}
|
||||
else
|
||||
{
|
||||
writer.owner_handle = owner_handle;
|
||||
writer.entry_count++;
|
||||
}
|
||||
|
||||
// Replace cached lock with the new lock if needed
|
||||
COS_TRACE(("Replace lock, type=%i", cached_lock->lck_type));
|
||||
if (newLock->lck_physical > cached_lock->lck_physical) {
|
||||
LCK_release(tdbb, cached_lock);
|
||||
delete cached_lock;
|
||||
cached_lock = newLock;
|
||||
if (!LCK_set_owner_handle(tdbb, cached_lock, LCK_get_owner_handle_by_type(tdbb, physicalLockOwner))) {
|
||||
COS_TRACE(("Error: set owner handle for captured lock, type=%i", cached_lock->lck_type));
|
||||
LCK_release(tdbb, cached_lock);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
else {
|
||||
LCK_release(tdbb, newLock);
|
||||
delete newLock;
|
||||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
// NOTE: unlock method must be signal safe
|
||||
// This function may be called in AST. The function doesn't wait.
|
||||
void GlobalRWLock::unlock(thread_db* tdbb, locklevel_t level, SLONG owner_handle)
|
||||
{
|
||||
SET_TDBB(tdbb);
|
||||
|
||||
CountersLockHolder lockHolder(lockMutex);
|
||||
|
||||
COS_TRACE(("unlock level=%i", level));
|
||||
|
||||
// Check if this is a recursion case
|
||||
if (level == LCK_read) {
|
||||
size_t n;
|
||||
if (!readers.find(owner_handle, n)) {
|
||||
ERR_bugcheck_msg("Attempt to call GlobalRWLock::unlock() while not holding a valid lock for logical owner");
|
||||
}
|
||||
fb_assert(readers[n].entry_count > 0);
|
||||
readers[n].entry_count--;
|
||||
if (readers[n].entry_count == 0)
|
||||
readers.remove(n);
|
||||
}
|
||||
else {
|
||||
fb_assert(writer.owner_handle == owner_handle);
|
||||
fb_assert(writer.entry_count == 1);
|
||||
fb_assert(cached_lock->lck_physical == LCK_write);
|
||||
|
||||
writer.entry_count = 0;
|
||||
writer.owner_handle = 0;
|
||||
|
||||
// Optimize non-contention case - downgrade to PR and re-use the lock
|
||||
if (!internal_blocking && !external_blocking && lockCaching)
|
||||
{
|
||||
if (!LCK_convert(tdbb, cached_lock, LCK_read, 0))
|
||||
ERR_bugcheck_msg("LCK_convert call failed in GlobalRWLock::unlock()");
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
if ( (readers.getCount() == 0) && (writer.entry_count == 0) ) {
|
||||
COS_TRACE(("check for release a lock, type=%i", cached_lock->lck_type));
|
||||
if (internal_blocking || !lockCaching) {
|
||||
LCK_release(tdbb, cached_lock);
|
||||
invalidate(tdbb, false);
|
||||
external_blocking = false;
|
||||
}
|
||||
else if (external_blocking) {
|
||||
LCK_downgrade(tdbb, cached_lock);
|
||||
if (cached_lock->lck_physical < LCK_read)
|
||||
invalidate(tdbb, false);
|
||||
external_blocking = false;
|
||||
}
|
||||
}
|
||||
|
||||
COS_TRACE(("unlock type=%i, level=%i, readerscount=%i, owner=%i", cached_lock->lck_type, level, readers.getCount(), owner_handle));
|
||||
}
|
||||
|
||||
void GlobalRWLock::blockingAstHandler(thread_db* tdbb)
|
||||
{
|
||||
SET_TDBB(tdbb);
|
||||
|
||||
CountersLockHolder lockHolder(lockMutex);
|
||||
|
||||
COS_TRACE_AST("bloackingAstHandler");
|
||||
// When we request a new lock counters are not updated until we get it.
|
||||
// As such, we need to check internal_blocking flag that is set during such situation.
|
||||
if ( !internal_blocking && (readers.getCount() == 0) && (writer.entry_count == 0) ) {
|
||||
COS_TRACE_AST("downgrade");
|
||||
LCK_downgrade(tdbb, cached_lock);
|
||||
if (cached_lock->lck_physical < LCK_read) {
|
||||
invalidate(tdbb, true);
|
||||
external_blocking = false;
|
||||
}
|
||||
}
|
||||
else
|
||||
external_blocking = true;
|
||||
}
|
||||
|
||||
void GlobalRWLock::setLockData(SLONG lck_data)
|
||||
{
|
||||
LCK_write_data(cached_lock, lck_data);
|
||||
}
|
||||
|
||||
void GlobalRWLock::changeLockOwner(thread_db* tdbb, locklevel_t level, SLONG old_owner_handle, SLONG new_owner_handle)
|
||||
{
|
||||
SET_TDBB(tdbb);
|
||||
|
||||
if (old_owner_handle == new_owner_handle)
|
||||
return;
|
||||
|
||||
CountersLockHolder lockHolder(lockMutex);
|
||||
|
||||
if (level == LCK_read) {
|
||||
size_t n;
|
||||
if (readers.find(old_owner_handle, n)) {
|
||||
fb_assert(readers[n].entry_count > 0);
|
||||
readers[n].entry_count--;
|
||||
if (readers[n].entry_count == 0)
|
||||
readers.remove(n);
|
||||
|
||||
if (readers.find(new_owner_handle, n))
|
||||
readers[n].entry_count++;
|
||||
else {
|
||||
ObjectOwnerData ownerData;
|
||||
ownerData.entry_count++;
|
||||
ownerData.owner_handle = new_owner_handle;
|
||||
readers.insert(n, ownerData);
|
||||
}
|
||||
}
|
||||
else {
|
||||
ERR_bugcheck_msg("Attempt to perform GlobalRWLock::change_lock_owner() while not holding a valid lock for logical owner");
|
||||
}
|
||||
}
|
||||
else {
|
||||
fb_assert(writer.entry_count == 1);
|
||||
writer.owner_handle = new_owner_handle;
|
||||
}
|
||||
}
|
||||
|
||||
bool GlobalRWLock::tryReleaseLock(thread_db* tdbb)
|
||||
{
|
||||
CountersLockHolder lockHolder(lockMutex);
|
||||
if (!writer.entry_count && !readers.getCount())
|
||||
{
|
||||
LCK_release(tdbb, cached_lock);
|
||||
invalidate(tdbb, false);
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
} // namespace Jrd
|
170
src/jrd/GlobalRWLock.h
Normal file
170
src/jrd/GlobalRWLock.h
Normal file
@ -0,0 +1,170 @@
|
||||
/*
|
||||
* PROGRAM: JRD Access Method
|
||||
* MODULE: GlobalRWLock.h
|
||||
* DESCRIPTION: Cached Object Synchronizer
|
||||
*
|
||||
* The contents of this file are subject to the Initial
|
||||
* Developer's Public License Version 1.0 (the "License");
|
||||
* you may not use this file except in compliance with the
|
||||
* License. You may obtain a copy of the License at
|
||||
* http://www.ibphoenix.com/main.nfs?a=ibphoenix&page=ibp_idpl.
|
||||
*
|
||||
* Software distributed under the License is distributed AS IS,
|
||||
* WITHOUT WARRANTY OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing rights
|
||||
* and limitations under the License.
|
||||
*
|
||||
* The Original Code was created by Nickolay Samofatov
|
||||
* for the Firebird Open Source RDBMS project.
|
||||
*
|
||||
* Copyright (c) 2006 Nickolay Samofatov
|
||||
* and all contributors signed below.
|
||||
*
|
||||
* All Rights Reserved.
|
||||
* Contributor(s): ______________________________________.
|
||||
*
|
||||
*
|
||||
*/
|
||||
|
||||
#include "../common/classes/alloc.h"
|
||||
#include "../jrd/jrd.h"
|
||||
#include "../jrd/lck.h"
|
||||
#include "../jrd/lck_proto.h"
|
||||
#include "../include/fb_types.h"
|
||||
#include "../jrd/isc.h"
|
||||
|
||||
//#define COS_DEBUG
|
||||
|
||||
#ifdef COS_DEBUG
|
||||
DEFINE_TRACE_ROUTINE(cos_trace);
|
||||
#define COS_TRACE(args) cos_trace args
|
||||
#define COS_TRACE_AST(message) gds__trace(message)
|
||||
#else
|
||||
#define COS_TRACE(args) /* nothing */
|
||||
#define COS_TRACE_AST(message) /* nothing */
|
||||
#endif
|
||||
|
||||
|
||||
namespace Jrd {
|
||||
|
||||
typedef USHORT locktype_t;
|
||||
|
||||
struct ObjectOwnerData {
|
||||
SLONG owner_handle;
|
||||
ULONG entry_count;
|
||||
static const SLONG& generate(const void* sender, const ObjectOwnerData& value) {
|
||||
return value.owner_handle;
|
||||
}
|
||||
ObjectOwnerData() {
|
||||
owner_handle = 0;
|
||||
entry_count = 0;
|
||||
}
|
||||
};
|
||||
|
||||
/*
|
||||
* Architecture goals for the class
|
||||
* - Lock to protect intra-process cached resources with object-oriented interface:
|
||||
* invalidate()/fetch()
|
||||
* - Two lock modes: LCK_read (LCK_PR) and LCK_write (LCK_EX)
|
||||
* - Support for lock recursion (multiple acquires of a lock by a given owner)
|
||||
* - Flexible execution environment
|
||||
* - Multiple threads
|
||||
* - Multiple processes
|
||||
* - Signals
|
||||
* - Locks belong to logical owners (typically type ATTACHMENT or DATABASE, but
|
||||
* potentially also THREAD and PROCESS)
|
||||
* - Logical ownership of a lock may change during the object access lifecycle
|
||||
* (somewhat special case, happens if cached resource needs to be passed from
|
||||
* one worker thread to another without releasing the lock)
|
||||
*
|
||||
* Implementation constraints
|
||||
* - Avoid calling lock manager for synchronization in non-contention case
|
||||
* (for performance reasons, especially in DLM environments)
|
||||
* - All contention to be handled via Lock manager to ensure reliable deadlock
|
||||
* detection and to be monitored and debuggable via standard means
|
||||
*/
|
||||
class GlobalRWLock : public Firebird::PermanentStorage {
|
||||
public:
|
||||
GlobalRWLock(thread_db* tdbb, MemoryPool& p, locktype_t lckType,
|
||||
size_t lockLen, const UCHAR* lockStr,
|
||||
lck_owner_t physical_lock_owner = LCK_OWNER_database,
|
||||
lck_owner_t default_logical_lock_owner = LCK_OWNER_attachment,
|
||||
bool lock_caching = true);
|
||||
|
||||
virtual ~GlobalRWLock();
|
||||
|
||||
// As usual,
|
||||
// wait = 0 - try to lock a thing instantly (doesn't send ASTs)
|
||||
// wait < 0 - timeout in seconds (doesn't deadlock)
|
||||
// wait > 0 - infinite wait (may deadlock)
|
||||
//
|
||||
// This function returns false if it cannot take the lock
|
||||
bool lock(thread_db* tdbb, locklevel_t level, SSHORT wait, SLONG owner_handle);
|
||||
bool lock(thread_db* tdbb, locklevel_t level, SSHORT wait) {
|
||||
return lock(tdbb, level, wait, LCK_get_owner_handle_by_type(tdbb, defaultLogicalLockOwner));
|
||||
}
|
||||
|
||||
// NOTE: unlock method must be signal safe
|
||||
// This function may be called in AST. The function doesn't wait.
|
||||
void unlock(thread_db* tdbb, locklevel_t level, SLONG owner_handle);
|
||||
void unlock(thread_db* tdbb, locklevel_t level) {
|
||||
unlock(tdbb, level, LCK_get_owner_handle_by_type(tdbb, defaultLogicalLockOwner));
|
||||
}
|
||||
|
||||
// Change the lock owner. The function doesn't wait.
|
||||
void changeLockOwner(thread_db* tdbb, locklevel_t level, SLONG old_owner_handle, SLONG new_owner_handle);
|
||||
|
||||
SLONG getLockData() {
|
||||
return cached_lock->lck_data;
|
||||
}
|
||||
void setLockData(SLONG lck_data);
|
||||
|
||||
// Release phisical lock if possible. Use to force refetch
|
||||
// Returns true if lock was released
|
||||
bool tryReleaseLock(thread_db* tdbb);
|
||||
protected:
|
||||
Lock* cached_lock;
|
||||
// Flag to indicate that somebody is waiting via lock manager.
|
||||
// If somebody uses lock manager, all concurrent requests should also
|
||||
// go via lock manager to prevent starvation.
|
||||
int internal_blocking;
|
||||
bool external_blocking; // Unprocessed AST pending
|
||||
|
||||
// Load the object from shared location.
|
||||
virtual void fetch(thread_db* tdbb) {}
|
||||
|
||||
// May be called under AST. Should not throw exceptions.
|
||||
virtual void invalidate(thread_db* tdbb, bool ast_handler) {}
|
||||
|
||||
virtual void blockingAstHandler(thread_db* tdbb);
|
||||
private:
|
||||
Firebird::Mutex lockMutex; // Protects status of logical lock, counters and blocking flag
|
||||
lck_owner_t physicalLockOwner; // Holds cached lock
|
||||
lck_owner_t defaultLogicalLockOwner; // Requests new lock to replace cached
|
||||
|
||||
// true - unlock keep cached lock and release by AST.
|
||||
// false - unlock releases cached lock if possible
|
||||
bool lockCaching;
|
||||
|
||||
Firebird::SortedArray<ObjectOwnerData, Firebird::EmptyStorage<ObjectOwnerData>,
|
||||
SLONG, ObjectOwnerData, Firebird::DefaultComparator<SLONG> > readers;
|
||||
ObjectOwnerData writer;
|
||||
|
||||
// In current implementation, threads are not used along with signals
|
||||
// Anyways, if we own mutex only with signals disabled this code
|
||||
// becomes signal-safe even in presense of threads.
|
||||
//
|
||||
// SUPERSERVER: We do not call any functions that can cause wait
|
||||
// when under counters lock so we do not need to release thread
|
||||
// scheduler here
|
||||
class CountersLockHolder : public AstInhibit, public Firebird::MutexLockGuard
|
||||
{
|
||||
public:
|
||||
CountersLockHolder(Firebird::Mutex& mtx)
|
||||
: AstInhibit(), MutexLockGuard(mtx) { }
|
||||
};
|
||||
|
||||
static int blocking_ast_cached_lock(void* ast_object);
|
||||
};
|
||||
|
||||
}
|
Loading…
Reference in New Issue
Block a user