<?php
+use andreskrey\Readability\Readability;
+use andreskrey\Readability\Configuration;
+
class Af_RedditImgur extends Plugin {
+
+ /* @var PluginHost $host */
private $host;
function about() {
}
function save() {
- $enable_readability = checkbox_to_sql_bool($_POST["enable_readability"]) == "true";
- $enable_content_dupcheck = checkbox_to_sql_bool($_POST["enable_content_dupcheck"]) == "true";
+ $enable_readability = checkbox_to_sql_bool($_POST["enable_readability"]);
+ $enable_content_dupcheck = checkbox_to_sql_bool($_POST["enable_content_dupcheck"]);
$this->host->set($this, "enable_readability", $enable_readability, false);
$this->host->set($this, "enable_content_dupcheck", $enable_content_dupcheck);
$img_entries = $xpath->query("(//img[@src])");
$found = false;
+ //$debug = 1;
foreach ($entries as $entry) {
- if ($entry->hasAttribute("href")) {
+ if ($entry->hasAttribute("href") && strpos($entry->getAttribute("href"), "reddit.com") === FALSE) {
_debug("processing href: " . $entry->getAttribute("href"), $debug);
$matches = array();
- if (preg_match("/^https?:\/\/twitter.com\/(.*?)\/status\/(.*)/", $entry->getAttribute("href"), $matches)) {
+ if (!$found && preg_match("/^https?:\/\/twitter.com\/(.*?)\/status\/(.*)/", $entry->getAttribute("href"), $matches)) {
_debug("handling as twitter: " . $matches[1] . " " . $matches[2], $debug);
$oembed_result = fetch_file_contents("https://publish.twitter.com/oembed?url=" . urlencode($entry->getAttribute("href")));
$poster_url = false;
}
- // IMPORTANT: This assumes the article GUID is kept in "owner_uid,entry_guid" format, and
- // Reddit feed entries will keep the <id> element (so get_id returns it).
- $feeditem_id = explode(",", $article["guid"])[1];
+ // Get original article URL from v.redd.it redirects
+ $source_article_url = $this->get_location($matches[0]);
+ _debug("Resolved ".$matches[0]." to ".$source_article_url, $debug);
+
$source_stream = false;
- $j = json_decode(fetch_file_contents($article["link"].".json"), true);
-
- if ($j) {
- foreach ($j as $listing) {
- foreach ($listing["data"]["children"] as $child) {
- // Found the child object corresponding to the article (e.g. same name+ID like "t3_70j63a").
- if ($child["data"]["name"] == $feeditem_id) {
- try {
- $source_stream = $child["data"]["media"]["reddit_video"]["fallback_url"];
- }
- catch (Exception $e) {
+
+ if ($source_article_url) {
+ $j = json_decode(fetch_file_contents($source_article_url.".json"), true);
+
+ if ($j) {
+ foreach ($j as $listing) {
+ foreach ($listing["data"]["children"] as $child) {
+ if ($child["data"]["url"] == $matches[0]) {
+ try {
+ $source_stream = $child["data"]["media"]["reddit_video"]["fallback_url"];
+ }
+ catch (Exception $e) {
+ }
+ break 2;
}
- break 2;
}
}
}
// linked albums & pages
- if (!$found && preg_match("/^https?:\/\/(m\.)?imgur.com\/([^\.\/]+$)/", $entry->getAttribute("href"), $matches) ||
+ /*if (!$found && preg_match("/^https?:\/\/(m\.)?imgur.com\/([^\.\/]+$)/", $entry->getAttribute("href"), $matches) ||
preg_match("/^https?:\/\/(m\.)?imgur.com\/(a|album|gallery)\/[^\.]+$/", $entry->getAttribute("href"), $matches)) {
_debug("Handling as an imgur page/album/gallery", $debug);
if ($debug) print_r($urls);
}
}
- }
+ } */
// wtf is this even
if (!$found && preg_match("/^https?:\/\/gyazo\.com\/([^\.\/]+$)/", $entry->getAttribute("href"), $matches)) {
$found = true;
}
+
+ // let's try meta properties
+ if (!$found) {
+ _debug("looking for meta og:image", $debug);
+
+ $content = fetch_file_contents(["url" => $entry->getAttribute("href"),
+ "http_accept" => "text/*"]);
+
+ if ($content) {
+ $cdoc = new DOMDocument();
+
+ if (@$cdoc->loadHTML($content)) {
+ $cxpath = new DOMXPath($cdoc);
+
+ $og_image = $cxpath->query("//meta[@property='og:image']")->item(0);
+
+ if ($og_image) {
+
+ $og_src = $og_image->getAttribute("content");
+
+ if ($og_src) {
+ $img = $doc->createElement('img');
+ $img->setAttribute("src", $og_src);
+
+ $br = $doc->createElement('br');
+ $entry->parentNode->insertBefore($img, $entry);
+ $entry->parentNode->insertBefore($br, $entry);
+
+ $found = true;
+ }
+ }
+ }
+ }
+ }
+
}
// remove tiny thumbnails
if ($this->host->get($this, "enable_content_dupcheck")) {
if ($content_link) {
- $content_href = db_escape_string($content_link->getAttribute("href"));
- $entry_guid = db_escape_string($article["guid_hashed"]);
+ $content_href = $content_link->getAttribute("href");
+ $entry_guid = $article["guid_hashed"];
$owner_uid = $article["owner_uid"];
if (DB_TYPE == "pgsql") {
$interval_qpart = "date_entered < DATE_SUB(NOW(), INTERVAL 1 DAY)";
}
- $result = db_query("SELECT COUNT(id) AS cid
+ $sth = $this->pdo->prepare("SELECT COUNT(id) AS cid
FROM ttrss_entries, ttrss_user_entries WHERE
ref_id = id AND
$interval_qpart AND
- guid != '$entry_guid' AND
- owner_uid = '$owner_uid' AND
- content LIKE '%href=\"$content_href\">[link]%'");
+ guid != ? AND
+ owner_uid = ? AND
+ content LIKE ?");
- if ($result) {
- $num_found = db_fetch_result($result, 0, "cid");
+ $sth->execute([$entry_guid, $owner_uid, "%href=\"$content_href\">[link]%"]);
+
+ if ($row = $sth->fetch()) {
+ $num_found = $row['cid'];
if ($num_found > 0) $article["force_catchup"] = true;
}
}
}
- private function get_content_type($url, $useragent = SELF_USER_AGENT) {
- $content_type = false;
+ private function get_header($url, $useragent = SELF_USER_AGENT, $header) {
+ $ret = false;
if (function_exists("curl_init") && !defined("NO_CURL")) {
$ch = curl_init($url);
curl_setopt($ch, CURLOPT_USERAGENT, $useragent);
@curl_exec($ch);
- $content_type = curl_getinfo($ch, CURLINFO_CONTENT_TYPE);
+ $ret = curl_getinfo($ch, $header);
}
- return $content_type;
+ return $ret;
+ }
+
+ private function get_content_type($url, $useragent = SELF_USER_AGENT) {
+ return $this->get_header($url, $useragent, CURLINFO_CONTENT_TYPE);
+ }
+
+ private function get_location($url, $useragent = SELF_USER_AGENT) {
+ return $this->get_header($url, $useragent, CURLINFO_EFFECTIVE_URL);
}
/**
if (!class_exists("Readability")) require_once(dirname(dirname(__DIR__)). "/lib/readability/Readability.php");
- if ($url &&
- strpos($url, "twitter.com") === FALSE &&
- strpos($url, "youtube.com") === FALSE &&
- strpos($url, "reddit.com") === FALSE) {
+ // do not try to embed posts linking back to other reddit posts
+ if ($url && strpos($url, "reddit.com") === FALSE) {
/* link may lead to a huge video file or whatever, we need to check content type before trying to
parse it which p much requires curl */
if ($content_type && strpos($content_type, "text/html") !== FALSE) {
- $tmp = fetch_file_contents(array("url" => $url,
- "useragent" => $useragent_compat));
+ $tmp = fetch_file_contents(["url" => $url,
+ "useragent" => $useragent_compat,
+ "http_accept" => "text/html"]);
if ($debug) _debug("tmplen: " . mb_strlen($tmp));
if ($tmp && mb_strlen($tmp) < 1024 * 500) {
- $r = new Readability($tmp, $url);
+ $r = new Readability(new Configuration());
- if ($r->init()) {
+ try {
+ if ($r->parse($tmp)) {
- $tmpxpath = new DOMXPath($r->dom);
+ $tmpxpath = new DOMXPath($r->getDOMDocument());
- $entries = $tmpxpath->query('(//a[@href]|//img[@src])');
+ $entries = $tmpxpath->query('(//a[@href]|//img[@src])');
- foreach ($entries as $entry) {
- if ($entry->hasAttribute("href")) {
- $entry->setAttribute("href",
- rewrite_relative_url($url, $entry->getAttribute("href")));
+ foreach ($entries as $entry) {
+ if ($entry->hasAttribute("href")) {
+ $entry->setAttribute("href",
+ rewrite_relative_url($url, $entry->getAttribute("href")));
- }
+ }
+
+ if ($entry->hasAttribute("src")) {
+ $entry->setAttribute("src",
+ rewrite_relative_url($url, $entry->getAttribute("src")));
- if ($entry->hasAttribute("src")) {
- $entry->setAttribute("src",
- rewrite_relative_url($url, $entry->getAttribute("src")));
+ }
}
+ $article["content"] = $r->getContent() . "<hr/>" . $article["content"];
}
-
- $article["content"] = $r->articleContent->innerHTML . "<hr/>" . $article["content"];
+ } catch (ParseException $e) {
+ //
}
}
}