mirror of
https://github.com/movie-web/providers.git
synced 2025-09-13 11:33:25 +00:00
@@ -24,7 +24,7 @@ import { zoechipScraper } from '@/providers/sources/zoechip';
|
||||
import { closeLoadScraper } from './embeds/closeload';
|
||||
import { fileMoonScraper } from './embeds/filemoon';
|
||||
import { ridooScraper } from './embeds/ridoo';
|
||||
import { smashyStreamDScraper } from './embeds/smashystream/dued';
|
||||
import { smashyStreamOScraper } from './embeds/smashystream/opstream';
|
||||
import { smashyStreamFScraper } from './embeds/smashystream/video1';
|
||||
import { streamtapeScraper } from './embeds/streamtape';
|
||||
import { streamvidScraper } from './embeds/streamvid';
|
||||
@@ -75,7 +75,7 @@ export function gatherAllEmbeds(): Array<Embed> {
|
||||
vidsrcembedScraper,
|
||||
streambucketScraper,
|
||||
smashyStreamFScraper,
|
||||
smashyStreamDScraper,
|
||||
smashyStreamOScraper,
|
||||
ridooScraper,
|
||||
closeLoadScraper,
|
||||
fileMoonScraper,
|
||||
|
@@ -1,71 +0,0 @@
|
||||
import { load } from 'cheerio';
|
||||
|
||||
import { flags } from '@/entrypoint/utils/targets';
|
||||
import { makeEmbed } from '@/providers/base';
|
||||
|
||||
type DPlayerSourcesResponse = {
|
||||
title: string;
|
||||
id: string;
|
||||
file: string;
|
||||
}[];
|
||||
|
||||
export const smashyStreamDScraper = makeEmbed({
|
||||
id: 'smashystream-d',
|
||||
name: 'SmashyStream (D)',
|
||||
rank: 71,
|
||||
async scrape(ctx) {
|
||||
const mainPageRes = await ctx.proxiedFetcher<string>(ctx.url, {
|
||||
headers: {
|
||||
Referer: ctx.url,
|
||||
},
|
||||
});
|
||||
const mainPageRes$ = load(mainPageRes);
|
||||
const iframeUrl = mainPageRes$('iframe').attr('src');
|
||||
if (!iframeUrl) throw new Error(`[${this.name}] failed to find iframe url`);
|
||||
const mainUrl = new URL(iframeUrl);
|
||||
const iframeRes = await ctx.proxiedFetcher<string>(iframeUrl, {
|
||||
headers: {
|
||||
Referer: ctx.url,
|
||||
},
|
||||
});
|
||||
const textFilePath = iframeRes.match(/"file":"([^"]+)"/)?.[1];
|
||||
const csrfToken = iframeRes.match(/"key":"([^"]+)"/)?.[1];
|
||||
if (!textFilePath || !csrfToken) throw new Error(`[${this.name}] failed to find text file url or token`);
|
||||
const textFileUrl = `${mainUrl.origin}${textFilePath}`;
|
||||
const textFileRes = await ctx.proxiedFetcher<DPlayerSourcesResponse>(textFileUrl, {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Content-Type': 'application/x-www-form-urlencoded',
|
||||
'X-CSRF-TOKEN': csrfToken,
|
||||
Referer: iframeUrl,
|
||||
},
|
||||
});
|
||||
// Playlists in Hindi, English, Tamil and Telugu are available. We only get the english one.
|
||||
const textFilePlaylist = textFileRes.find((x) => x.title === 'English')?.file;
|
||||
if (!textFilePlaylist) throw new Error(`[${this.name}] failed to find an english playlist`);
|
||||
|
||||
const playlistRes = await ctx.proxiedFetcher<string>(
|
||||
`${mainUrl.origin}/playlist/${textFilePlaylist.slice(1)}.txt`,
|
||||
{
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Content-Type': 'application/x-www-form-urlencoded',
|
||||
'X-CSRF-TOKEN': csrfToken,
|
||||
Referer: iframeUrl,
|
||||
},
|
||||
},
|
||||
);
|
||||
|
||||
return {
|
||||
stream: [
|
||||
{
|
||||
id: 'primary',
|
||||
playlist: playlistRes,
|
||||
type: 'hls',
|
||||
flags: [flags.CORS_ALLOWED],
|
||||
captions: [],
|
||||
},
|
||||
],
|
||||
};
|
||||
},
|
||||
});
|
17
src/providers/embeds/smashystream/opstream.ts
Normal file
17
src/providers/embeds/smashystream/opstream.ts
Normal file
@@ -0,0 +1,17 @@
|
||||
import { makeEmbed } from '@/providers/base';
|
||||
|
||||
import { smashyStreamFScraper } from './video1';
|
||||
|
||||
export const smashyStreamOScraper = makeEmbed({
|
||||
// the scraping logic for all smashystream embeds is the same
|
||||
// all the embeds can be added in the same way
|
||||
id: 'smashystream-o',
|
||||
name: 'SmashyStream (O)',
|
||||
rank: 70,
|
||||
async scrape(ctx) {
|
||||
const result = await smashyStreamFScraper.scrape(ctx);
|
||||
return {
|
||||
stream: result.stream,
|
||||
};
|
||||
},
|
||||
});
|
@@ -1,16 +1,29 @@
|
||||
import { flags } from '@/entrypoint/utils/targets';
|
||||
import { makeEmbed } from '@/providers/base';
|
||||
import { Caption, getCaptionTypeFromUrl, labelToLanguageCode } from '@/providers/captions';
|
||||
import { NotFoundError } from '@/utils/errors';
|
||||
|
||||
type FPlayerResponse = {
|
||||
sourceUrls: string[];
|
||||
subtitleUrls: string;
|
||||
subtitles: string | null;
|
||||
};
|
||||
|
||||
// if you don't understand how this is reversed
|
||||
// check https://discord.com/channels/871713465100816424/1186646348137775164/1225644477188935770
|
||||
// feel free to reach out atpn or ciaran_ds on discord if you've any problems
|
||||
function decode(str: string): string {
|
||||
const b = ['U0ZML2RVN0IvRGx4', 'MGNhL0JWb0kvTlM5', 'Ym94LzJTSS9aU0Zj', 'SGJ0L1dGakIvN0dX', 'eE52L1QwOC96N0Yz'];
|
||||
let formatedB64 = str.slice(2);
|
||||
for (let i = 4; i > -1; i--) {
|
||||
formatedB64 = formatedB64.replace(`//${b[i]}`, '');
|
||||
}
|
||||
return atob(formatedB64);
|
||||
}
|
||||
|
||||
export const smashyStreamFScraper = makeEmbed({
|
||||
id: 'smashystream-f',
|
||||
name: 'SmashyStream (F)',
|
||||
rank: 70,
|
||||
rank: 71,
|
||||
async scrape(ctx) {
|
||||
const res = await ctx.proxiedFetcher<FPlayerResponse>(ctx.url, {
|
||||
headers: {
|
||||
@@ -18,15 +31,20 @@ export const smashyStreamFScraper = makeEmbed({
|
||||
},
|
||||
});
|
||||
|
||||
if (!res.sourceUrls[0]) throw new NotFoundError('No watchable item found');
|
||||
|
||||
const playlist = decode(res.sourceUrls[0]);
|
||||
if (!playlist.includes('.m3u8')) throw new Error('Failed to decode');
|
||||
|
||||
const captions: Caption[] =
|
||||
res.subtitleUrls
|
||||
.match(/\[([^\]]+)\](https?:\/\/\S+?)(?=,\[|$)/g)
|
||||
res.subtitles
|
||||
?.match(/\[([^\]]+)\](https?:\/\/\S+?)(?=,\[|$)/g)
|
||||
?.map<Caption | null>((entry: string) => {
|
||||
const match = entry.match(/\[([^\]]+)\](https?:\/\/\S+?)(?=,\[|$)/);
|
||||
if (match) {
|
||||
const [, language, url] = match;
|
||||
if (language && url) {
|
||||
const languageCode = labelToLanguageCode(language);
|
||||
const languageCode = labelToLanguageCode(language.replace(/ - .*/, ''));
|
||||
const captionType = getCaptionTypeFromUrl(url);
|
||||
if (!languageCode || !captionType) return null;
|
||||
return {
|
||||
@@ -46,7 +64,7 @@ export const smashyStreamFScraper = makeEmbed({
|
||||
stream: [
|
||||
{
|
||||
id: 'primary',
|
||||
playlist: res.sourceUrls[0],
|
||||
playlist,
|
||||
type: 'hls',
|
||||
flags: [flags.CORS_ALLOWED],
|
||||
captions,
|
||||
|
@@ -1,56 +1,29 @@
|
||||
import { load } from 'cheerio';
|
||||
|
||||
import { flags } from '@/entrypoint/utils/targets';
|
||||
import { SourcererEmbed, SourcererOutput, makeSourcerer } from '@/providers/base';
|
||||
import { smashyStreamDScraper } from '@/providers/embeds/smashystream/dued';
|
||||
import { SourcererOutput, makeSourcerer } from '@/providers/base';
|
||||
import { smashyStreamOScraper } from '@/providers/embeds/smashystream/opstream';
|
||||
import { smashyStreamFScraper } from '@/providers/embeds/smashystream/video1';
|
||||
import { MovieScrapeContext, ShowScrapeContext } from '@/utils/context';
|
||||
|
||||
const smashyStreamBase = 'https://embed.smashystream.com';
|
||||
const referer = 'https://smashystream.com/';
|
||||
|
||||
const universalScraper = async (ctx: ShowScrapeContext | MovieScrapeContext): Promise<SourcererOutput> => {
|
||||
const mainPage = await ctx.proxiedFetcher<string>('/playere.php', {
|
||||
query: {
|
||||
tmdb: ctx.media.tmdbId,
|
||||
...(ctx.media.type === 'show' && {
|
||||
season: ctx.media.season.number.toString(),
|
||||
episode: ctx.media.episode.number.toString(),
|
||||
}),
|
||||
},
|
||||
headers: {
|
||||
Referer: referer,
|
||||
},
|
||||
baseUrl: smashyStreamBase,
|
||||
});
|
||||
|
||||
ctx.progress(30);
|
||||
|
||||
const mainPage$ = load(mainPage);
|
||||
const sourceUrls = mainPage$('.dropdown-menu a[data-url]')
|
||||
.map((_, el) => mainPage$(el).attr('data-url'))
|
||||
.get();
|
||||
|
||||
const embeds: SourcererEmbed[] = [];
|
||||
for (const sourceUrl of sourceUrls) {
|
||||
if (sourceUrl.includes('video1d.php')) {
|
||||
embeds.push({
|
||||
embedId: smashyStreamFScraper.id,
|
||||
url: sourceUrl,
|
||||
});
|
||||
}
|
||||
if (sourceUrl.includes('dued.php')) {
|
||||
embeds.push({
|
||||
embedId: smashyStreamDScraper.id,
|
||||
url: sourceUrl,
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
ctx.progress(60);
|
||||
// theres no point in fetching the player page
|
||||
// because it too just calls the api with the tmdb id
|
||||
// thats the only way to find out if the embed has any streams
|
||||
const query =
|
||||
ctx.media.type === 'movie'
|
||||
? `?tmdb=${ctx.media.tmdbId}`
|
||||
: `?tmdbId=${ctx.media.tmdbId}&season=${ctx.media.season.number}&episode=${ctx.media.episode.number}`;
|
||||
|
||||
return {
|
||||
embeds,
|
||||
embeds: [
|
||||
{
|
||||
embedId: smashyStreamFScraper.id,
|
||||
url: `https://embed.smashystream.com/video1dn.php${query}`,
|
||||
},
|
||||
{
|
||||
embedId: smashyStreamOScraper.id,
|
||||
url: `https://embed.smashystream.com/videoop.php${query}`,
|
||||
},
|
||||
],
|
||||
};
|
||||
};
|
||||
|
||||
@@ -59,7 +32,6 @@ export const smashyStreamScraper = makeSourcerer({
|
||||
name: 'SmashyStream',
|
||||
rank: 30,
|
||||
flags: [flags.CORS_ALLOWED],
|
||||
disabled: true,
|
||||
scrapeMovie: universalScraper,
|
||||
scrapeShow: universalScraper,
|
||||
});
|
||||
|
Reference in New Issue
Block a user