Feburary 2020 Smoke-Indica.com updates

8 months ago


(source)

Updates to smoke-indica.com

Data acquisition

Previously I was fetching my last 100 resmoked posts using smoke-js, however in early january this stopped working & so I've now switched to extracting posts from every block manually.

I chose to run my own smoked client rather than flood the public API server with 14 million requests.

I threw together an ubuntu 16.04 VM with 20GB RAM and 100 GB HDD, allocating an extra 10 GB as swapfile to set the shared disk file size to 30GB in the witness config.ini

I had to set plugin=database-api to enable the database-api based queries to work in smoke-js.

I had originally compiled it from source, however that client experienced log spam from darian's dos. I downloaded the latest smoked release & it worked without any error message spam in the terminal.

The script takes about 8 hours to iterate over more than 14 million blocks, but the outcome is well worth it!

#!/usr/bin/env node
let smoke = require('@smokenetwork/smoke-js');
let fs = require('fs');

smoke.api.setOptions({ url: 'ws://127.0.0.1:8090' });

const followedAuthors = ["indica"];

const endBlock = 14500000;

function querySmoke (i) {
  return new Promise((resolve, reject) => {
    try {
      smoke.api.getBlock(i, function(err, result) {
        if (result && result.transactions.length) {
          return resolve(result);
        } else {
          return resolve(null);
        }
      });

    } catch (error) {
      console.warn(`${i}: ${error}`);
    }
  });
}

let comments = [];
let blockComments = {};
function commentsFromBlocks (i) {
    try {
      querySmoke(i)
      .then(result => {
        if (result) {
          const currentTX = result.transactions;
          currentTX.forEach((tx) => {
            const txOPs = tx.operations;
            const filteredOPs = txOPs.filter(eachOP => eachOP[0] === "comment").filter(op => op[1].parent_author === "" && op[1].title.length && followedAuthors.includes(op[1].author));

            if (filteredOPs.length) {
              comments = comments.concat(filteredOPs);
            }
          });
        }
      })
      .then(() => {
        if (i < endBlock) {
          commentsFromBlocks(i + 1)
        } else {
          if (comments.length) {
            console.log(`posts: ${comments.length}`);
            comments.forEach((comment, i) => {
              const thisOP = comment[1];
              if (thisOP.author in blockComments) {
                if (!blockComments[thisOP.author].includes(thisOP.permlink)) {
                  let thisComment = blockComments[thisOP.author];
                  thisComment.push(thisOP.permlink);
                  blockComments[thisOP.author] = thisComment;
                }
              } else {
                blockComments[thisOP.author] = [thisOP.permlink];
              }
            });
          } else {
            console.log('Nothing to save to disk.')
          }

          fs.writeFile(`followerPosts.json`, JSON.stringify(blockComments), 'utf8', function(err){
            if(err) throw err;
          });
        }
      })
      .catch(error => {
        console.log(`ERROR: (${i}) ${error}`);
      })
    } catch (error) {
      console.log(error)
    }
}

try {
  commentsFromBlocks(14409831);
} catch (error) {
  console.log(error)
}

The above will result in a JSON file stored to disk which contains the following structure:

{author_name: [perm_link]}

With this new JSON data containing every post by our followed authors, we can request the post contents:

let smoke = require('@smokenetwork/smoke-js');
let fs = require('fs');

smoke.api.setOptions({ url: 'ws://127.0.0.1:8090' });

let extractedPosts = [];

fs.readFile('followerPosts.json', function readFileCallback(err, data) {
    if (err) {
      console.log(err);
    } else {
      const jsonData = JSON.parse(data);
      Object.keys(jsonData).forEach(function(author, i) {
        const currentAuthorPosts = jsonData[author];
        currentAuthorPosts.forEach((permalink, j) => {
          smoke.api.getContent(author, permalink, function(error, result) {
            if (result) {
              extractedPosts.push(result);
              if ((i === Object.keys(jsonData).length - 1) && (j === Object.keys(currentAuthorPosts).length - 1)) {
                console.log("complete")
                fs.writeFile(`followerContent.json`, JSON.stringify(extractedPosts), 'utf8', function(err){
                  if(err) throw err;
                });
              }
            }

            if (error) {
              console.log(`getContentError: ${error}`);
            }
          });
        });

      });
    }
});

Now we need to make the markdown files for importing to Gatsby:

let fs = require('fs');
var sanitizeHtml = require('sanitize-html');

let extractedPosts = [];

fs.readFile('followerContent.json', function readFileCallback(err, data) {
    if (err) {
      console.log(err);
    } else {
      const jsonData = JSON.parse(data);
      jsonData.forEach((post) => {
        if (parseInt(post.total_payout_value.split(" ")[0]) > 100) {
          const metadata = JSON.parse(post.json_metadata);
          let output = `---\n`;
          output += `title: '${post.title.replace('4:20','420').replace(':', ' ').replace(/'/g, '').replace(/"/g, '')}'\n`;
          output += `tags:${metadata.tags ? (metadata.tags.map(tag => `\n  - '${tag.toString()}'`)).join('') : ' ' + post.category.toString()}\n`;
          output += `images:${metadata.image ? (metadata.image.map(img => `\n  - >-\n    ${img.replace('\n', '')}`)).join('') : ' []'}\n`;
          output += `author: ${post.author}\n`;
          output += `net_votes: ${post.net_votes}\n`;
          output += `template: "post"\n`;
          output += `description: ""\n`;
          output += `draft: false\n`;
          output += `category: ""\n`;
          output += `total_payout_value: '${post.total_payout_value}'\n`;
          output += `pending_payout_value: '${post.pending_payout_value}'\n`;
          output += `permlink: ${post.permlink}\n`;
          output += `date: ${post.created}\n`;
          output += `---\n\n`;
          const body = sanitizeHtml(post.body,{allowedTags: false,allowedAttributes: false});
          output += `${body.replace(/: /g, ' ')}`;

          fs.writeFile(`${post.permlink}.md`, output, 'utf8', function(err){
            if(err) throw err;
          });
        }
      });
    }
});

How much more content has been added to smoke-indica.com ?

Most users I'm currently following should have an author page now, and any post which earned at least 100 SMOKE should now be listed on your author page. Before it was only reblogged content, however smoke-js does not populate the reblog (resmoke) array (same in steem-js).

Before there were approx 3k pages, now there are over 12k!

Since there is much more indexed content, the author and tag leaderboads have changed.

What else has changed?

There are now scrolling animations implemented throughout the website; when you scroll the next items will slide into view.

This has been applied to feed items on the index and tag pages, to the leaderboard text, the 'similar content' sections below articles and on the author page.

Try it out, go to smoke-indica.com and scroll 👍

Thoughts?

What do you think so far? Have any requests for new features, views, elements?

Do you think the tag pages should be more compact similar to the author page, and for there to be more of them (up from 25 to say 100) per page?



Get 4.2 Free Smoke Power On Sign Up To Start Your Journey On Smoke! The First Cannabis Community That Pays You To Post And Curate Content You Love..
Sort Order:  Trending

You made it more attractive as after the last update your website became easier to navigate by tag usage, trending tags or authors ( rewarded & featured ), while the compact view on the main page is really so amazing.

Overall it looks perfect, better than in past and no changes are needed, the only thing that can be added is an extra stat that shows the visitors the most visited tag, posts, etc.. in real-time ( if possible)!

·

Added another tab 'optimized', it renders only 10 items rather than all 42 in the other 2.

One problem is you can't search for text for items not yet rendered in the optimized page, so it might not be great for author pages

·
·

I checked the optimized tab just now / Good one but my favorite still the compact one.

·
·
·

Changed it further, the optimized tab has replaced the normal & compact tabs, the performance improvement for mobile is significant:

·
·
·
·

Well done / The performance improvement is significant as the optimized tab now, looks better than the other two tabs, while it is easier for the visitors to scroll than clicking next - Thank you.

·
·
·
·

The green look awesome, while using 4 tabs "New - Most rewards - Most votes - Shuffled" will increase the traffic as it gives N possibilities to visitors to do their search.

Thanks for continued development and hard effort promoting our works, I am sure that your website will spread the words in no time and will attract new users to our loved platform "smoke.io"

💪 I made the top 10! ... the first thing I noticed 😁

Site looks awesome, the quick little animations are perfect! Only negative I have to say is my profile image doesn't load LOL

So... since you are running a node for your site does that mean your going to join us in block production? I recently spun up the second witness again as full API which I am keeping private as a backup just in case 😉 need to figure out my changing IP address before I do anything else with it 😅

·

So... since you are running a node for your site does that mean your going to join us in block production?

I might run a seed node, but not running a witness node for now. This was more for large scale querying.

·
·

Good to know whom has a potential backup system 🙌

·

your avatar url when cached with the image cache results in the following error {"status":"error","code":404,"message":"The requested URL returned error: 403"}

·
·

Ahh discord image error I’d bet.... I’ll have to update it 😉

It has totally different insights than the ones before. It has no clutter of informations.... I would say has a clean - cut once more from the last one.

Oh, I checked my account... the title image is not showing in each post, lol....

Awesome work as always... I have to say, the added features are really amazing...

I like it like its right now! I wouldn't change anything 😊

·

Cool, I might add extra stats pages then since they're out of the way

·

Super Clean!

It's pretty @Grooting just the way it is! :D

Nice interface, super liked

·

Thanks 👍

I think it looks great. Go with 42 ....

·

I second that notion 👌

·

42 tag posts?

·
·

Yes ....

Do you think the tag pages should be more compact similar to the author page, and for there to be more of them ....

42 per page

·
·
·

cool

·
·
·

Increased to 2k per page, with the optimized page format 👍

·
·
·
·

Excellent. That’s a lot more...

·
·
·
·
·
·

👍

·
·

Thanks for the feedback 👍