Programatically isolate and moderate hate speech + toxic content — for free.
We implement state of the art machine learning models trained on hundreds of thousands of comments to accurately filter out bad content.
We empower blog, forum, and other community moderators and owners by automatically flagging potentially offensive content for review, reducing moderator workload, and improving community safety.
curl -X POST "https://api.moderatehatespeech.com/api/v1/toxic/" \
-d '{"token":"YOUR_API_TOKEN_HERE","text":"TEXT_CONTENT_HERE"}'
<?php
/**
* Checks each submitted comment against ModerateHatespeech's API, and detects flagged comments with a confidence threshold over $CONF_THRESHOLD
*
* @author ModerateHatespeech.com
* @since 1.0
* @param int $id ID of the inserted comment
* @param WP_Comment $comment WP_Comment object of inserted comment.
*/
function hs_check_comment($id, $comment) {
$CONF_THRESHOLD = 0.9; // the minimum confidence threshold to take action. values betwee 0.5 and 1
$args = array(
'method' => 'POST',
'timeout' => 10,
'headers' => array(
'Content-Type' => 'application/json',
),
'body' => json_encode(
array(
"token" => "YOUR_API_TOKEN_HERE",
"text" => $comment->comment_content,
)
),
);
$request = wp_remote_post("https://api.moderatehatespeech.com/api/v1/toxic/", $args);
if (!is_wp_error($request) && wp_remote_retrieve_response_code($request) == 200) {
$response = json_decode(wp_remote_retrieve_body($request), true);
if ($response["class"] == "flag" && $response["confidence"] > $CONF_THRESHOLD){
// do something
// Eg, wp_delete_comment($comment->comment_ID);
}
}
}
add_action('wp_insert_comment', 'hs_check_comment', 10, 2);
<?php
/**
* Checks each submitted comment against ModerateHatespeech's API, and detects flagged comments with a confidence threshold over $CONF_THRESHOLD
*
* @author ModerateHatespeech.com
* @since 1.0
* @param string $comment Text to check
*/
function hs_check_comment($comment) {
$ch = curl_init("https://api.moderatehatespeech.com/api/v1/toxic/");
$CONF_THRESHOLD = 0.9; // the minimum confidence threshold to take action. values betwee 0.5 and 1
$payload = json_encode(
array(
"token" => "YOUR_API_TOKEN_HERE",
"text" => $comment,
)
);
curl_setopt( $ch, CURLOPT_POSTFIELDS, $payload );
curl_setopt( $ch, CURLOPT_HTTPHEADER, array('Content-Type: application/json'));
curl_setopt( $ch, CURLOPT_RETURNTRANSFER, true );
$result = curl_exec($ch);
curl_close($ch);
$response = json_decode($result, true);
if ($response["class"] == "flag" && $response["confidence"] > $CONF_THRESHOLD){
// do something
// Eg, log comment
return true;
}
return false;
}
import requests
def hs_check_comment(comment):
CONF_THRESHOLD = 0.9
data = {
"token": "YOUR_API_TOKEN_HERE",
"text": comment
}
response = requests.post("https://api.moderatehatespeech.com/api/v1/toxic/", json=data).json()
if response["class"] == "flag" and float(response["confidence"]) > CONF_THRESHOLD:
# Do something
return True
return False