mirror of
https://github.com/movie-web/providers.git
synced 2025-09-13 17:53:24 +00:00
add dropload, filelions and vtube
This commit is contained in:
@@ -1,7 +1,9 @@
|
||||
import { Embed, Sourcerer } from '@/providers/base';
|
||||
import { doodScraper } from '@/providers/embeds/dood';
|
||||
import { droploadScraper } from '@/providers/embeds/dropload';
|
||||
import { febboxHlsScraper } from '@/providers/embeds/febbox/hls';
|
||||
import { febboxMp4Scraper } from '@/providers/embeds/febbox/mp4';
|
||||
import { filelionsScraper } from '@/providers/embeds/filelions';
|
||||
import { mixdropScraper } from '@/providers/embeds/mixdrop';
|
||||
import { mp4uploadScraper } from '@/providers/embeds/mp4upload';
|
||||
import { streambucketScraper } from '@/providers/embeds/streambucket';
|
||||
@@ -9,6 +11,7 @@ import { streamsbScraper } from '@/providers/embeds/streamsb';
|
||||
import { upcloudScraper } from '@/providers/embeds/upcloud';
|
||||
import { upstreamScraper } from '@/providers/embeds/upstream';
|
||||
import { vidsrcembedScraper } from '@/providers/embeds/vidsrc';
|
||||
import { vTubeScraper } from '@/providers/embeds/vtube';
|
||||
import { flixhqScraper } from '@/providers/sources/flixhq/index';
|
||||
import { goMoviesScraper } from '@/providers/sources/gomovies/index';
|
||||
import { kissAsianScraper } from '@/providers/sources/kissasian/index';
|
||||
@@ -82,5 +85,8 @@ export function gatherAllEmbeds(): Array<Embed> {
|
||||
streamvidScraper,
|
||||
voeScraper,
|
||||
streamtapeScraper,
|
||||
droploadScraper,
|
||||
filelionsScraper,
|
||||
vTubeScraper,
|
||||
];
|
||||
}
|
||||
|
56
src/providers/embeds/dropload.ts
Normal file
56
src/providers/embeds/dropload.ts
Normal file
@@ -0,0 +1,56 @@
|
||||
import { unpack } from 'unpacker';
|
||||
|
||||
import { flags } from '@/entrypoint/utils/targets';
|
||||
|
||||
import { makeEmbed } from '../base';
|
||||
|
||||
const evalCodeRegex = /eval\((.*)\)/g;
|
||||
const fileRegex = /file:"(.*?)"/g;
|
||||
const tracksRegex = /\{file:"([^"]+)",kind:"thumbnails"\}/g;
|
||||
|
||||
export const droploadScraper = makeEmbed({
|
||||
id: 'dropload',
|
||||
name: 'Dropload',
|
||||
rank: 120,
|
||||
scrape: async (ctx) => {
|
||||
const mainPageRes = await ctx.fetcher.full<string>(ctx.url, {
|
||||
headers: {
|
||||
referer: ctx.url,
|
||||
},
|
||||
});
|
||||
const mainPageUrl = new URL(mainPageRes.finalUrl);
|
||||
const mainPage = mainPageRes.body;
|
||||
|
||||
const evalCode = mainPage.match(evalCodeRegex);
|
||||
if (!evalCode) throw new Error('Failed to find eval code');
|
||||
const unpacked = unpack(evalCode[1]);
|
||||
|
||||
const file = fileRegex.exec(unpacked);
|
||||
const thumbnailTrack = tracksRegex.exec(unpacked);
|
||||
if (!file?.[1]) throw new Error('Failed to find file');
|
||||
|
||||
return {
|
||||
stream: [
|
||||
{
|
||||
id: 'primary',
|
||||
type: 'hls',
|
||||
playlist: file[1],
|
||||
flags: [flags.IP_LOCKED, flags.CORS_ALLOWED],
|
||||
captions: [],
|
||||
preferredHeaders: {
|
||||
Referer: mainPageUrl.origin,
|
||||
origin: mainPageUrl.origin,
|
||||
},
|
||||
...(thumbnailTrack
|
||||
? {
|
||||
thumbnailTrack: {
|
||||
type: 'vtt',
|
||||
url: mainPageUrl.origin + thumbnailTrack[1],
|
||||
},
|
||||
}
|
||||
: {}),
|
||||
},
|
||||
],
|
||||
};
|
||||
},
|
||||
});
|
51
src/providers/embeds/filelions.ts
Normal file
51
src/providers/embeds/filelions.ts
Normal file
@@ -0,0 +1,51 @@
|
||||
import { flags } from '@/entrypoint/utils/targets';
|
||||
import { makeEmbed } from '@/providers/base';
|
||||
|
||||
const linkRegex = /file: ?"(http.*?)"/;
|
||||
// the white space charecters may seem useless, but without them it breaks
|
||||
const tracksRegex = /\{file:\s"([^"]+)",\skind:\s"thumbnails"\}/g;
|
||||
|
||||
export const filelionsScraper = makeEmbed({
|
||||
id: 'filelions',
|
||||
name: 'filelions',
|
||||
rank: 115,
|
||||
async scrape(ctx) {
|
||||
const mainPageRes = await ctx.fetcher.full<string>(ctx.url, {
|
||||
headers: {
|
||||
referer: ctx.url,
|
||||
},
|
||||
});
|
||||
const mainPage = mainPageRes.body;
|
||||
const mainPageUrl = new URL(mainPageRes.finalUrl);
|
||||
|
||||
const streamUrl = mainPage.match(linkRegex) ?? [];
|
||||
const thumbnailTrack = tracksRegex.exec(mainPage);
|
||||
|
||||
const playlist = streamUrl[1];
|
||||
if (!playlist) throw new Error('Stream url not found');
|
||||
|
||||
return {
|
||||
stream: [
|
||||
{
|
||||
id: 'primary',
|
||||
type: 'hls',
|
||||
playlist,
|
||||
flags: [flags.IP_LOCKED, flags.CORS_ALLOWED],
|
||||
captions: [],
|
||||
preferredHeaders: {
|
||||
Referer: mainPageUrl.origin,
|
||||
origin: mainPageUrl.origin,
|
||||
},
|
||||
...(thumbnailTrack
|
||||
? {
|
||||
thumbnailTrack: {
|
||||
type: 'vtt',
|
||||
url: new URL(mainPageRes.finalUrl).origin + thumbnailTrack[1],
|
||||
},
|
||||
}
|
||||
: {}),
|
||||
},
|
||||
],
|
||||
};
|
||||
},
|
||||
});
|
51
src/providers/embeds/vtube.ts
Normal file
51
src/providers/embeds/vtube.ts
Normal file
@@ -0,0 +1,51 @@
|
||||
import { load } from 'cheerio';
|
||||
import { unpack } from 'unpacker';
|
||||
|
||||
import { flags } from '@/entrypoint/utils/targets';
|
||||
|
||||
import { makeEmbed } from '../base';
|
||||
|
||||
const evalCodeRegex = /eval\((.*)\)/g;
|
||||
const fileRegex = /file:"(.*?)"/g;
|
||||
const tracksRegex = /\{file:"([^"]+)",kind:"thumbnails"\}/g;
|
||||
|
||||
export const vTubeScraper = makeEmbed({
|
||||
id: 'vtube',
|
||||
name: 'vTube',
|
||||
rank: 145,
|
||||
scrape: async (ctx) => {
|
||||
const mainPageRes = await ctx.fetcher.full<string>(ctx.url, {
|
||||
headers: {
|
||||
referer: ctx.url,
|
||||
},
|
||||
});
|
||||
const mainPage = mainPageRes.body;
|
||||
const html = load(mainPage);
|
||||
const evalCode = html('script').text().match(evalCodeRegex);
|
||||
if (!evalCode) throw new Error('Failed to find eval code');
|
||||
const unpacked = unpack(evalCode?.toString());
|
||||
const file = fileRegex.exec(unpacked);
|
||||
const thumbnailTrack = tracksRegex.exec(unpacked);
|
||||
if (!file?.[1]) throw new Error('Failed to find file');
|
||||
|
||||
return {
|
||||
stream: [
|
||||
{
|
||||
id: 'primary',
|
||||
type: 'hls',
|
||||
playlist: file[1],
|
||||
flags: [flags.CORS_ALLOWED],
|
||||
captions: [],
|
||||
...(thumbnailTrack
|
||||
? {
|
||||
thumbnailTrack: {
|
||||
type: 'vtt',
|
||||
url: new URL(mainPageRes.finalUrl).origin + thumbnailTrack[1],
|
||||
},
|
||||
}
|
||||
: {}),
|
||||
},
|
||||
],
|
||||
};
|
||||
},
|
||||
});
|
@@ -54,6 +54,15 @@ async function getStreams(title: string) {
|
||||
case 'dood.watch':
|
||||
embedId = 'dood';
|
||||
break;
|
||||
case 'dropload.io':
|
||||
embedId = 'dropload';
|
||||
break;
|
||||
case 'filelions.to':
|
||||
embedId = 'filelions';
|
||||
break;
|
||||
case 'vtube.to':
|
||||
embedId = 'vtube';
|
||||
break;
|
||||
default:
|
||||
embedId = null;
|
||||
}
|
||||
|
Reference in New Issue
Block a user