mirror of
https://github.com/movie-web/providers.git
synced 2025-09-13 17:53:24 +00:00
Merge branch 'dev' into vidsrcto
This commit is contained in:
@@ -41,6 +41,7 @@ async function runBrowserScraping(
|
||||
args: ['--no-sandbox', '--disable-setuid-sandbox'],
|
||||
});
|
||||
const page = await browser.newPage();
|
||||
page.on('console', (message) => console.log(`${message.type().slice(0, 3).toUpperCase()} ${message.text()}`));
|
||||
await page.goto(server.resolvedUrls.local[0]);
|
||||
await page.waitForFunction('!!window.scrape', { timeout: 5000 });
|
||||
|
||||
|
@@ -81,6 +81,7 @@ export async function processOptions(sources: Array<Embed | Sourcerer>, options:
|
||||
const providerOptions: ProviderMakerOptions = {
|
||||
fetcher,
|
||||
target: targets.ANY,
|
||||
consistentIpForRequests: true,
|
||||
};
|
||||
|
||||
return {
|
||||
|
@@ -2,9 +2,9 @@ import { gatherAllEmbeds, gatherAllSources } from '@/providers/all';
|
||||
import { Embed, Sourcerer } from '@/providers/base';
|
||||
|
||||
export function getBuiltinSources(): Sourcerer[] {
|
||||
return gatherAllSources();
|
||||
return gatherAllSources().filter((v) => !v.disabled);
|
||||
}
|
||||
|
||||
export function getBuiltinEmbeds(): Embed[] {
|
||||
return gatherAllEmbeds();
|
||||
return gatherAllEmbeds().filter((v) => !v.disabled);
|
||||
}
|
||||
|
@@ -4,7 +4,7 @@ export type FetcherOptions = {
|
||||
baseUrl?: string;
|
||||
headers?: Record<string, string>;
|
||||
query?: Record<string, string>;
|
||||
method?: 'GET' | 'POST';
|
||||
method?: 'HEAD' | 'GET' | 'POST';
|
||||
readHeaders?: string[];
|
||||
body?: Record<string, any> | string | FormData | URLSearchParams;
|
||||
};
|
||||
@@ -17,7 +17,7 @@ export type DefaultedFetcherOptions = {
|
||||
headers: Record<string, string>;
|
||||
query: Record<string, string>;
|
||||
readHeaders: string[];
|
||||
method: 'GET' | 'POST';
|
||||
method: 'HEAD' | 'GET' | 'POST';
|
||||
};
|
||||
|
||||
export type FetcherResponse<T = any> = {
|
||||
@@ -28,12 +28,12 @@ export type FetcherResponse<T = any> = {
|
||||
};
|
||||
|
||||
// This is the version that will be inputted by library users
|
||||
export type Fetcher<T = any> = {
|
||||
(url: string, ops: DefaultedFetcherOptions): Promise<FetcherResponse<T>>;
|
||||
export type Fetcher = {
|
||||
<T = any>(url: string, ops: DefaultedFetcherOptions): Promise<FetcherResponse<T>>;
|
||||
};
|
||||
|
||||
// This is the version that scrapers will be interacting with
|
||||
export type UseableFetcher<T = any> = {
|
||||
(url: string, ops?: FetcherOptions): Promise<T>;
|
||||
full: (url: string, ops?: FetcherOptions) => Promise<FetcherResponse<T>>;
|
||||
export type UseableFetcher = {
|
||||
<T = any>(url: string, ops?: FetcherOptions): Promise<T>;
|
||||
full: <T = any>(url: string, ops?: FetcherOptions) => Promise<FetcherResponse<T>>;
|
||||
};
|
||||
|
@@ -1,6 +1,6 @@
|
||||
export type { EmbedOutput, SourcererOutput } from '@/providers/base';
|
||||
export type { Stream, StreamFile, FileBasedStream, HlsBasedStream, Qualities } from '@/providers/streams';
|
||||
export type { Fetcher, FetcherOptions, FetcherResponse } from '@/fetchers/types';
|
||||
export type { Fetcher, DefaultedFetcherOptions, FetcherOptions, FetcherResponse } from '@/fetchers/types';
|
||||
export type { RunOutput } from '@/runners/runner';
|
||||
export type { MetaOutput } from '@/entrypoint/utils/meta';
|
||||
export type { FullScraperEvents } from '@/entrypoint/utils/events';
|
||||
@@ -9,6 +9,8 @@ export type { MediaTypes, ShowMedia, ScrapeMedia, MovieMedia } from '@/entrypoin
|
||||
export type { ProviderControls, RunnerOptions, EmbedRunnerOptions, SourceRunnerOptions } from '@/entrypoint/controls';
|
||||
export type { ProviderBuilder } from '@/entrypoint/builder';
|
||||
export type { ProviderMakerOptions } from '@/entrypoint/declare';
|
||||
export type { MovieScrapeContext, ShowScrapeContext, EmbedScrapeContext, ScrapeContext } from '@/utils/context';
|
||||
export type { SourcererOptions, EmbedOptions } from '@/providers/base';
|
||||
|
||||
export { NotFoundError } from '@/utils/errors';
|
||||
export { makeProviders } from '@/entrypoint/declare';
|
||||
|
@@ -3,15 +3,18 @@ import { febboxHlsScraper } from '@/providers/embeds/febbox/hls';
|
||||
import { febboxMp4Scraper } from '@/providers/embeds/febbox/mp4';
|
||||
import { mixdropScraper } from '@/providers/embeds/mixdrop';
|
||||
import { mp4uploadScraper } from '@/providers/embeds/mp4upload';
|
||||
import { streambucketScraper } from '@/providers/embeds/streambucket';
|
||||
import { streamsbScraper } from '@/providers/embeds/streamsb';
|
||||
import { upcloudScraper } from '@/providers/embeds/upcloud';
|
||||
import { upstreamScraper } from '@/providers/embeds/upstream';
|
||||
import { vidsrcembedScraper } from '@/providers/embeds/vidsrc';
|
||||
import { flixhqScraper } from '@/providers/sources/flixhq/index';
|
||||
import { goMoviesScraper } from '@/providers/sources/gomovies/index';
|
||||
import { kissAsianScraper } from '@/providers/sources/kissasian/index';
|
||||
import { lookmovieScraper } from '@/providers/sources/lookmovie';
|
||||
import { remotestreamScraper } from '@/providers/sources/remotestream';
|
||||
import { showboxScraper } from '@/providers/sources/showbox/index';
|
||||
import { vidsrcScraper } from '@/providers/sources/vidsrc/index';
|
||||
import { zoechipScraper } from '@/providers/sources/zoechip';
|
||||
|
||||
import { fileMoonScraper } from './embeds/filemoon';
|
||||
@@ -30,6 +33,7 @@ export function gatherAllSources(): Array<Sourcerer> {
|
||||
showboxScraper,
|
||||
goMoviesScraper,
|
||||
zoechipScraper,
|
||||
vidsrcScraper,
|
||||
lookmovieScraper,
|
||||
smashyStreamScraper,
|
||||
vidSrcToScraper,
|
||||
@@ -46,6 +50,8 @@ export function gatherAllEmbeds(): Array<Embed> {
|
||||
febboxMp4Scraper,
|
||||
febboxHlsScraper,
|
||||
mixdropScraper,
|
||||
vidsrcembedScraper,
|
||||
streambucketScraper,
|
||||
smashyStreamFScraper,
|
||||
smashyStreamDScraper,
|
||||
fileMoonScraper,
|
||||
|
@@ -1,5 +1,4 @@
|
||||
import { MediaTypes } from '@/entrypoint/utils/media';
|
||||
import { flags } from '@/entrypoint/utils/targets';
|
||||
import { makeEmbed } from '@/providers/base';
|
||||
import { parseInputUrl } from '@/providers/embeds/febbox/common';
|
||||
import { getStreams } from '@/providers/embeds/febbox/fileList';
|
||||
@@ -16,6 +15,7 @@ export const febboxHlsScraper = makeEmbed({
|
||||
id: 'febbox-hls',
|
||||
name: 'Febbox (HLS)',
|
||||
rank: 160,
|
||||
disabled: true,
|
||||
async scrape(ctx) {
|
||||
const { type, id, season, episode } = parseInputUrl(ctx.url);
|
||||
const sharelinkResult = await ctx.proxiedFetcher<{
|
||||
@@ -40,7 +40,7 @@ export const febboxHlsScraper = makeEmbed({
|
||||
{
|
||||
id: 'primary',
|
||||
type: 'hls',
|
||||
flags: [flags.CORS_ALLOWED],
|
||||
flags: [],
|
||||
captions: await getSubtitles(ctx, id, firstStream.fid, type as MediaTypes, season, episode),
|
||||
playlist: `https://www.febbox.com/hls/main/${firstStream.oss_fid}.m3u8`,
|
||||
},
|
||||
|
@@ -4,24 +4,35 @@ import { ScrapeContext } from '@/utils/context';
|
||||
|
||||
const allowedQualities = ['360', '480', '720', '1080', '4k'];
|
||||
|
||||
export async function getStreamQualities(ctx: ScrapeContext, apiQuery: object) {
|
||||
const mediaRes: { list: { path: string; quality: string; fid?: number }[] } = (await sendRequest(ctx, apiQuery)).data;
|
||||
interface FebboxQuality {
|
||||
path: string;
|
||||
real_quality: string;
|
||||
fid?: number;
|
||||
}
|
||||
|
||||
const qualityMap = mediaRes.list
|
||||
.filter((file) => allowedQualities.includes(file.quality.replace('p', '')))
|
||||
.map((file) => ({
|
||||
url: file.path,
|
||||
quality: file.quality.replace('p', ''),
|
||||
}));
|
||||
function mapToQuality(quality: FebboxQuality): FebboxQuality | null {
|
||||
const q = quality.real_quality.replace('p', '').toLowerCase();
|
||||
if (!allowedQualities.includes(q)) return null;
|
||||
return {
|
||||
real_quality: q,
|
||||
path: quality.path,
|
||||
fid: quality.fid,
|
||||
};
|
||||
}
|
||||
|
||||
export async function getStreamQualities(ctx: ScrapeContext, apiQuery: object) {
|
||||
const mediaRes: { list: FebboxQuality[] } = (await sendRequest(ctx, apiQuery)).data;
|
||||
|
||||
const qualityMap = mediaRes.list.map((v) => mapToQuality(v)).filter((v): v is FebboxQuality => !!v);
|
||||
|
||||
const qualities: Record<string, StreamFile> = {};
|
||||
|
||||
allowedQualities.forEach((quality) => {
|
||||
const foundQuality = qualityMap.find((q) => q.quality === quality);
|
||||
if (foundQuality && foundQuality.url) {
|
||||
const foundQuality = qualityMap.find((q) => q.real_quality === quality && q.path);
|
||||
if (foundQuality) {
|
||||
qualities[quality] = {
|
||||
type: 'mp4',
|
||||
url: foundQuality.url,
|
||||
url: foundQuality.path,
|
||||
};
|
||||
}
|
||||
});
|
||||
|
@@ -37,22 +37,28 @@ export async function getSubtitles(
|
||||
const subResult = (await sendRequest(ctx, subtitleApiQuery)) as CaptionApiResponse;
|
||||
const subtitleList = subResult.data.list;
|
||||
const output: Caption[] = [];
|
||||
const languagesAdded: Record<string, true> = {};
|
||||
|
||||
subtitleList.forEach((sub) => {
|
||||
const subtitle = sub.subtitles.sort((a, b) => b.order - a.order)[0];
|
||||
if (!subtitle) return;
|
||||
|
||||
const subtitleFilePath = subtitle.file_path
|
||||
.replace(captionsDomains[0], captionsDomains[1])
|
||||
.replace(/\s/g, '+')
|
||||
.replace(/[()]/g, (c) => {
|
||||
return `%${c.charCodeAt(0).toString(16)}`;
|
||||
});
|
||||
|
||||
const subtitleType = getCaptionTypeFromUrl(subtitleFilePath);
|
||||
if (!subtitleType) return;
|
||||
|
||||
const validCode = isValidLanguageCode(subtitle.lang);
|
||||
if (!validCode) return;
|
||||
|
||||
if (languagesAdded[subtitle.lang]) return;
|
||||
languagesAdded[subtitle.lang] = true;
|
||||
|
||||
output.push({
|
||||
id: subtitleFilePath,
|
||||
language: subtitle.lang,
|
||||
|
101
src/providers/embeds/streambucket.ts
Normal file
101
src/providers/embeds/streambucket.ts
Normal file
@@ -0,0 +1,101 @@
|
||||
import { flags } from '@/entrypoint/utils/targets';
|
||||
import { makeEmbed } from '@/providers/base';
|
||||
|
||||
// StreamBucket makes use of https://github.com/nicxlau/hunter-php-javascript-obfuscator
|
||||
|
||||
const hunterRegex = /eval\(function\(h,u,n,t,e,r\).*?\("(.*?)",\d*?,"(.*?)",(\d*?),(\d*?),\d*?\)\)/;
|
||||
const linkRegex = /file:"(.*?)"/;
|
||||
|
||||
// This is a much more simple and optimized version of the "h,u,n,t,e,r"
|
||||
// obfuscation algorithm. It's just basic chunked+mask encoding.
|
||||
// I have seen this same encoding used on some sites under the name
|
||||
// "p,l,a,y,e,r" as well
|
||||
function decodeHunter(encoded: string, mask: string, charCodeOffset: number, delimiterOffset: number) {
|
||||
// The encoded string is made up of 'n' number of chunks.
|
||||
// Each chunk is separated by a delimiter inside the mask.
|
||||
// This offset is also used as the exponentiation base in
|
||||
// the charCode calculations
|
||||
const delimiter = mask[delimiterOffset];
|
||||
|
||||
// Split the 'encoded' string into chunks using the delimiter,
|
||||
// and filter out any empty chunks.
|
||||
const chunks = encoded.split(delimiter).filter((chunk) => chunk);
|
||||
|
||||
// Decode each chunk and concatenate the results to form the final 'decoded' string.
|
||||
const decoded = chunks
|
||||
.map((chunk) => {
|
||||
// Chunks are in reverse order. 'reduceRight' removes the
|
||||
// need to 'reverse' the array first
|
||||
const charCode = chunk.split('').reduceRight((c, value, index) => {
|
||||
// Calculate the character code for each character in the chunk.
|
||||
// This involves finding the index of 'value' in the 'mask' and
|
||||
// multiplying it by (delimiterOffset^position).
|
||||
return c + mask.indexOf(value) * delimiterOffset ** (chunk.length - 1 - index);
|
||||
}, 0);
|
||||
|
||||
// The actual character code is offset by the given amount
|
||||
return String.fromCharCode(charCode - charCodeOffset);
|
||||
})
|
||||
.join('');
|
||||
|
||||
return decoded;
|
||||
}
|
||||
|
||||
export const streambucketScraper = makeEmbed({
|
||||
id: 'streambucket',
|
||||
name: 'StreamBucket',
|
||||
rank: 196,
|
||||
// TODO - Disabled until ctx.fetcher and ctx.proxiedFetcher don't trigger bot detection
|
||||
disabled: true,
|
||||
async scrape(ctx) {
|
||||
// Using the context fetchers make the site return just the string "No bots please!"?
|
||||
// TODO - Fix this. Native fetch does not trigger this. No idea why right now
|
||||
const response = await fetch(ctx.url);
|
||||
const html = await response.text();
|
||||
|
||||
// This is different than the above mentioned bot detection
|
||||
if (html.includes('captcha-checkbox')) {
|
||||
// TODO - This doesn't use recaptcha, just really basic "image match". Maybe could automate?
|
||||
throw new Error('StreamBucket got captchaed');
|
||||
}
|
||||
|
||||
let regexResult = html.match(hunterRegex);
|
||||
|
||||
if (!regexResult) {
|
||||
throw new Error('Failed to find StreamBucket hunter JavaScript');
|
||||
}
|
||||
|
||||
const encoded = regexResult[1];
|
||||
const mask = regexResult[2];
|
||||
const charCodeOffset = Number(regexResult[3]);
|
||||
const delimiterOffset = Number(regexResult[4]);
|
||||
|
||||
if (Number.isNaN(charCodeOffset)) {
|
||||
throw new Error('StreamBucket hunter JavaScript charCodeOffset is not a valid number');
|
||||
}
|
||||
|
||||
if (Number.isNaN(delimiterOffset)) {
|
||||
throw new Error('StreamBucket hunter JavaScript delimiterOffset is not a valid number');
|
||||
}
|
||||
|
||||
const decoded = decodeHunter(encoded, mask, charCodeOffset, delimiterOffset);
|
||||
|
||||
regexResult = decoded.match(linkRegex);
|
||||
|
||||
if (!regexResult) {
|
||||
throw new Error('Failed to find StreamBucket HLS link');
|
||||
}
|
||||
|
||||
return {
|
||||
stream: [
|
||||
{
|
||||
id: 'primary',
|
||||
type: 'hls',
|
||||
playlist: regexResult[1],
|
||||
flags: [flags.CORS_ALLOWED],
|
||||
captions: [],
|
||||
},
|
||||
],
|
||||
};
|
||||
},
|
||||
});
|
55
src/providers/embeds/vidsrc.ts
Normal file
55
src/providers/embeds/vidsrc.ts
Normal file
@@ -0,0 +1,55 @@
|
||||
import { flags } from '@/entrypoint/utils/targets';
|
||||
import { makeEmbed } from '@/providers/base';
|
||||
|
||||
const hlsURLRegex = /file:"(.*?)"/;
|
||||
const setPassRegex = /var pass_path = "(.*set_pass\.php.*)";/;
|
||||
|
||||
export const vidsrcembedScraper = makeEmbed({
|
||||
id: 'vidsrcembed', // VidSrc is both a source and an embed host
|
||||
name: 'VidSrc',
|
||||
rank: 197,
|
||||
async scrape(ctx) {
|
||||
const html = await ctx.proxiedFetcher<string>(ctx.url, {
|
||||
headers: {
|
||||
referer: ctx.url,
|
||||
},
|
||||
});
|
||||
|
||||
const match = html
|
||||
.match(hlsURLRegex)?.[1]
|
||||
?.replace(/(\/\/\S+?=)/g, '')
|
||||
.replace('#2', '');
|
||||
if (!match) throw new Error('Unable to find HLS playlist');
|
||||
const finalUrl = atob(match);
|
||||
|
||||
if (!finalUrl.includes('.m3u8')) throw new Error('Unable to find HLS playlist');
|
||||
|
||||
let setPassLink = html.match(setPassRegex)?.[1];
|
||||
if (!setPassLink) throw new Error('Unable to find set_pass.php link');
|
||||
|
||||
if (setPassLink.startsWith('//')) {
|
||||
setPassLink = `https:${setPassLink}`;
|
||||
}
|
||||
|
||||
// VidSrc uses a password endpoint to temporarily whitelist the user's IP. This is called in an interval by the player.
|
||||
// It currently has no effect on the player itself, the content plays fine without it.
|
||||
// In the future we might have to introduce hooks for the frontend to call this endpoint.
|
||||
await ctx.proxiedFetcher(setPassLink, {
|
||||
headers: {
|
||||
referer: ctx.url,
|
||||
},
|
||||
});
|
||||
|
||||
return {
|
||||
stream: [
|
||||
{
|
||||
id: 'primary',
|
||||
type: 'hls',
|
||||
playlist: finalUrl,
|
||||
flags: [flags.CORS_ALLOWED],
|
||||
captions: [],
|
||||
},
|
||||
],
|
||||
};
|
||||
},
|
||||
});
|
@@ -10,8 +10,8 @@ async function universalScraper(ctx: MovieScrapeContext | ShowScrapeContext): Pr
|
||||
if (!lookmovieData) throw new NotFoundError('Media not found');
|
||||
|
||||
ctx.progress(30);
|
||||
const videoUrl = await scrape(ctx, ctx.media, lookmovieData);
|
||||
if (!videoUrl) throw new NotFoundError('No video found');
|
||||
const video = await scrape(ctx, ctx.media, lookmovieData);
|
||||
if (!video.playlist) throw new NotFoundError('No video found');
|
||||
|
||||
ctx.progress(60);
|
||||
|
||||
@@ -20,10 +20,10 @@ async function universalScraper(ctx: MovieScrapeContext | ShowScrapeContext): Pr
|
||||
stream: [
|
||||
{
|
||||
id: 'primary',
|
||||
playlist: videoUrl,
|
||||
playlist: video.playlist,
|
||||
type: 'hls',
|
||||
flags: [flags.IP_LOCKED],
|
||||
captions: [],
|
||||
captions: video.captions,
|
||||
},
|
||||
],
|
||||
};
|
||||
@@ -33,6 +33,7 @@ export const lookmovieScraper = makeSourcerer({
|
||||
id: 'lookmovie',
|
||||
name: 'LookMovie',
|
||||
rank: 1,
|
||||
disabled: true,
|
||||
flags: [flags.IP_LOCKED],
|
||||
scrapeShow: universalScraper,
|
||||
scrapeMovie: universalScraper,
|
||||
|
@@ -39,8 +39,17 @@ interface VideoSources {
|
||||
[key: string]: string;
|
||||
}
|
||||
|
||||
interface VideoSubtitles {
|
||||
id?: number;
|
||||
id_movie?: number;
|
||||
url: string;
|
||||
language: string;
|
||||
shard?: string;
|
||||
}
|
||||
|
||||
export interface StreamsDataResult {
|
||||
streams: VideoSources;
|
||||
subtitles: VideoSubtitles[];
|
||||
}
|
||||
|
||||
export interface ResultItem {
|
||||
|
@@ -4,7 +4,9 @@ import { ScrapeContext } from '@/utils/context';
|
||||
import { NotFoundError } from '@/utils/errors';
|
||||
|
||||
import { Result, ResultItem, ShowDataResult, episodeObj } from './type';
|
||||
import { getVideoUrl } from './video';
|
||||
import { getVideo } from './video';
|
||||
|
||||
export const baseUrl = 'https://lmscript.xyz';
|
||||
|
||||
export async function searchAndFindMedia(
|
||||
ctx: ScrapeContext,
|
||||
@@ -12,7 +14,7 @@ export async function searchAndFindMedia(
|
||||
): Promise<ResultItem | undefined> {
|
||||
if (media.type === 'show') {
|
||||
const searchRes = await ctx.fetcher<Result>(`/v1/shows`, {
|
||||
baseUrl: 'https://lmscript.xyz',
|
||||
baseUrl,
|
||||
query: { 'filters[q]': media.title },
|
||||
});
|
||||
|
||||
@@ -23,7 +25,7 @@ export async function searchAndFindMedia(
|
||||
}
|
||||
if (media.type === 'movie') {
|
||||
const searchRes = await ctx.fetcher<Result>(`/v1/movies`, {
|
||||
baseUrl: 'https://lmscript.xyz',
|
||||
baseUrl,
|
||||
query: { 'filters[q]': media.title },
|
||||
});
|
||||
|
||||
@@ -40,7 +42,7 @@ export async function scrape(ctx: ScrapeContext, media: MovieMedia | ShowMedia,
|
||||
id = result.id_movie;
|
||||
} else if (media.type === 'show') {
|
||||
const data = await ctx.fetcher<ShowDataResult>(`/v1/shows`, {
|
||||
baseUrl: 'https://lmscript.xyz',
|
||||
baseUrl,
|
||||
query: { expand: 'episodes', id: result.id_show },
|
||||
});
|
||||
|
||||
@@ -54,6 +56,6 @@ export async function scrape(ctx: ScrapeContext, media: MovieMedia | ShowMedia,
|
||||
// Check ID
|
||||
if (id === null) throw new NotFoundError('Not found');
|
||||
|
||||
const videoUrl = await getVideoUrl(ctx, id, media);
|
||||
return videoUrl;
|
||||
const video = await getVideo(ctx, id, media);
|
||||
return video;
|
||||
}
|
||||
|
@@ -1,7 +1,9 @@
|
||||
import { MovieMedia, ShowMedia } from '@/entrypoint/utils/media';
|
||||
import { Caption } from '@/providers/captions';
|
||||
import { ScrapeContext } from '@/utils/context';
|
||||
|
||||
import { StreamsDataResult } from './type';
|
||||
import { baseUrl } from './util';
|
||||
|
||||
export async function getVideoSources(
|
||||
ctx: ScrapeContext,
|
||||
@@ -17,17 +19,17 @@ export async function getVideoSources(
|
||||
path = `/v1/movies/view`;
|
||||
}
|
||||
const data = await ctx.fetcher<StreamsDataResult>(path, {
|
||||
baseUrl: 'https://lmscript.xyz',
|
||||
query: { expand: 'streams', id },
|
||||
baseUrl,
|
||||
query: { expand: 'streams,subtitles', id },
|
||||
});
|
||||
return data;
|
||||
}
|
||||
|
||||
export async function getVideoUrl(
|
||||
export async function getVideo(
|
||||
ctx: ScrapeContext,
|
||||
id: string,
|
||||
media: MovieMedia | ShowMedia,
|
||||
): Promise<string | null> {
|
||||
): Promise<{ playlist: string | null; captions: Caption[] }> {
|
||||
// Get sources
|
||||
const data = await getVideoSources(ctx, id, media);
|
||||
const videoSources = data.streams;
|
||||
@@ -42,5 +44,16 @@ export async function getVideoUrl(
|
||||
}
|
||||
}
|
||||
|
||||
return videoUrl;
|
||||
const captions: Caption[] = data.subtitles.map((sub) => ({
|
||||
id: sub.url,
|
||||
type: 'vtt',
|
||||
url: `${baseUrl}${sub.url}`,
|
||||
hasCorsRestrictions: false,
|
||||
language: sub.language,
|
||||
}));
|
||||
|
||||
return {
|
||||
playlist: videoUrl,
|
||||
captions,
|
||||
};
|
||||
}
|
||||
|
@@ -2,7 +2,7 @@ import { flags } from '@/entrypoint/utils/targets';
|
||||
import { makeSourcerer } from '@/providers/base';
|
||||
import { NotFoundError } from '@/utils/errors';
|
||||
|
||||
const remotestreamBase = `https://fsa.remotestre.am`;
|
||||
const remotestreamBase = atob('aHR0cHM6Ly9mc2IuOG1ldDNkdGpmcmNxY2hjb25xcGtsd3hzeGIyb2N1bWMuc3RyZWFt');
|
||||
|
||||
export const remotestreamScraper = makeSourcerer({
|
||||
id: 'remotestream',
|
||||
@@ -16,8 +16,12 @@ export const remotestreamScraper = makeSourcerer({
|
||||
const playlistLink = `${remotestreamBase}/Shows/${ctx.media.tmdbId}/${seasonNumber}/${episodeNumber}/${episodeNumber}.m3u8`;
|
||||
|
||||
ctx.progress(30);
|
||||
const streamRes = await ctx.fetcher<Blob>(playlistLink); // TODO support blobs in fetchers
|
||||
if (streamRes.type !== 'application/x-mpegurl') throw new NotFoundError('No watchable item found');
|
||||
const streamRes = await ctx.fetcher.full(playlistLink, {
|
||||
method: 'HEAD',
|
||||
readHeaders: ['content-type'],
|
||||
});
|
||||
if (!streamRes.headers.get('content-type')?.toLowerCase().includes('application/x-mpegurl'))
|
||||
throw new NotFoundError('No watchable item found');
|
||||
ctx.progress(90);
|
||||
|
||||
return {
|
||||
@@ -37,8 +41,12 @@ export const remotestreamScraper = makeSourcerer({
|
||||
const playlistLink = `${remotestreamBase}/Movies/${ctx.media.tmdbId}/${ctx.media.tmdbId}.m3u8`;
|
||||
|
||||
ctx.progress(30);
|
||||
const streamRes = await ctx.fetcher<Blob>(playlistLink);
|
||||
if (streamRes.type !== 'application/x-mpegurl') throw new NotFoundError('No watchable item found');
|
||||
const streamRes = await ctx.fetcher.full(playlistLink, {
|
||||
method: 'HEAD',
|
||||
readHeaders: ['content-type'],
|
||||
});
|
||||
if (!streamRes.headers.get('content-type')?.toLowerCase().includes('application/x-mpegurl'))
|
||||
throw new NotFoundError('No watchable item found');
|
||||
ctx.progress(90);
|
||||
|
||||
return {
|
||||
|
@@ -1,6 +1,5 @@
|
||||
import { flags } from '@/entrypoint/utils/targets';
|
||||
import { SourcererOutput, makeSourcerer } from '@/providers/base';
|
||||
import { febboxHlsScraper } from '@/providers/embeds/febbox/hls';
|
||||
import { febboxMp4Scraper } from '@/providers/embeds/febbox/mp4';
|
||||
import { compareTitle } from '@/utils/compare';
|
||||
import { MovieScrapeContext, ShowScrapeContext } from '@/utils/context';
|
||||
@@ -31,10 +30,6 @@ async function comboScraper(ctx: ShowScrapeContext | MovieScrapeContext): Promis
|
||||
|
||||
return {
|
||||
embeds: [
|
||||
{
|
||||
embedId: febboxHlsScraper.id,
|
||||
url: `/${ctx.media.type}/${id}/${season}/${episode}`,
|
||||
},
|
||||
{
|
||||
embedId: febboxMp4Scraper.id,
|
||||
url: `/${ctx.media.type}/${id}/${season}/${episode}`,
|
||||
|
2
src/providers/sources/vidsrc/common.ts
Normal file
2
src/providers/sources/vidsrc/common.ts
Normal file
@@ -0,0 +1,2 @@
|
||||
export const vidsrcBase = 'https://vidsrc.me';
|
||||
export const vidsrcRCPBase = 'https://rcp.vidsrc.me';
|
13
src/providers/sources/vidsrc/index.ts
Normal file
13
src/providers/sources/vidsrc/index.ts
Normal file
@@ -0,0 +1,13 @@
|
||||
import { flags } from '@/entrypoint/utils/targets';
|
||||
import { makeSourcerer } from '@/providers/base';
|
||||
import { scrapeMovie } from '@/providers/sources/vidsrc/scrape-movie';
|
||||
import { scrapeShow } from '@/providers/sources/vidsrc/scrape-show';
|
||||
|
||||
export const vidsrcScraper = makeSourcerer({
|
||||
id: 'vidsrc',
|
||||
name: 'VidSrc',
|
||||
rank: 120,
|
||||
flags: [flags.CORS_ALLOWED],
|
||||
scrapeMovie,
|
||||
scrapeShow,
|
||||
});
|
8
src/providers/sources/vidsrc/scrape-movie.ts
Normal file
8
src/providers/sources/vidsrc/scrape-movie.ts
Normal file
@@ -0,0 +1,8 @@
|
||||
import { getVidSrcMovieSources } from '@/providers/sources/vidsrc/scrape';
|
||||
import { MovieScrapeContext } from '@/utils/context';
|
||||
|
||||
export async function scrapeMovie(ctx: MovieScrapeContext) {
|
||||
return {
|
||||
embeds: await getVidSrcMovieSources(ctx),
|
||||
};
|
||||
}
|
8
src/providers/sources/vidsrc/scrape-show.ts
Normal file
8
src/providers/sources/vidsrc/scrape-show.ts
Normal file
@@ -0,0 +1,8 @@
|
||||
import { getVidSrcShowSources } from '@/providers/sources/vidsrc/scrape';
|
||||
import { ShowScrapeContext } from '@/utils/context';
|
||||
|
||||
export async function scrapeShow(ctx: ShowScrapeContext) {
|
||||
return {
|
||||
embeds: await getVidSrcShowSources(ctx),
|
||||
};
|
||||
}
|
133
src/providers/sources/vidsrc/scrape.ts
Normal file
133
src/providers/sources/vidsrc/scrape.ts
Normal file
@@ -0,0 +1,133 @@
|
||||
import { load } from 'cheerio';
|
||||
|
||||
import { SourcererEmbed } from '@/providers/base';
|
||||
import { streambucketScraper } from '@/providers/embeds/streambucket';
|
||||
import { vidsrcembedScraper } from '@/providers/embeds/vidsrc';
|
||||
import { vidsrcBase, vidsrcRCPBase } from '@/providers/sources/vidsrc/common';
|
||||
import { MovieScrapeContext, ShowScrapeContext } from '@/utils/context';
|
||||
|
||||
function decodeSrc(encoded: string, seed: string) {
|
||||
let decoded = '';
|
||||
const seedLength = seed.length;
|
||||
|
||||
for (let i = 0; i < encoded.length; i += 2) {
|
||||
const byte = parseInt(encoded.substr(i, 2), 16);
|
||||
const seedChar = seed.charCodeAt((i / 2) % seedLength);
|
||||
decoded += String.fromCharCode(byte ^ seedChar);
|
||||
}
|
||||
|
||||
return decoded;
|
||||
}
|
||||
|
||||
async function getVidSrcEmbeds(ctx: MovieScrapeContext | ShowScrapeContext, startingURL: string) {
|
||||
// VidSrc works by using hashes and a redirect system.
|
||||
// The hashes are stored in the html, and VidSrc will
|
||||
// make requests to their servers with the hash. This
|
||||
// will trigger a 302 response with a Location header
|
||||
// sending the user to the correct embed. To get the
|
||||
// real embed links, we must do the same. Slow, but
|
||||
// required
|
||||
|
||||
const embeds: SourcererEmbed[] = [];
|
||||
|
||||
let html = await ctx.proxiedFetcher<string>(startingURL, {
|
||||
baseUrl: vidsrcBase,
|
||||
});
|
||||
|
||||
let $ = load(html);
|
||||
|
||||
const sourceHashes = $('.server[data-hash]')
|
||||
.toArray()
|
||||
.map((el) => $(el).attr('data-hash'))
|
||||
.filter((hash) => hash !== undefined);
|
||||
|
||||
for (const hash of sourceHashes) {
|
||||
html = await ctx.proxiedFetcher<string>(`/rcp/${hash}`, {
|
||||
baseUrl: vidsrcRCPBase,
|
||||
headers: {
|
||||
referer: vidsrcBase,
|
||||
},
|
||||
});
|
||||
|
||||
$ = load(html);
|
||||
const encoded = $('#hidden').attr('data-h');
|
||||
const seed = $('body').attr('data-i');
|
||||
|
||||
if (!encoded || !seed) {
|
||||
throw new Error('Failed to find encoded iframe src');
|
||||
}
|
||||
|
||||
let redirectURL = decodeSrc(encoded, seed);
|
||||
if (redirectURL.startsWith('//')) {
|
||||
redirectURL = `https:${redirectURL}`;
|
||||
}
|
||||
|
||||
const { finalUrl } = await ctx.proxiedFetcher.full(redirectURL, {
|
||||
method: 'HEAD',
|
||||
headers: {
|
||||
referer: vidsrcBase,
|
||||
},
|
||||
});
|
||||
|
||||
const embed: SourcererEmbed = {
|
||||
embedId: '',
|
||||
url: finalUrl,
|
||||
};
|
||||
|
||||
const parsedUrl = new URL(finalUrl);
|
||||
|
||||
switch (parsedUrl.host) {
|
||||
case 'vidsrc.stream':
|
||||
embed.embedId = vidsrcembedScraper.id;
|
||||
break;
|
||||
case 'streambucket.net':
|
||||
embed.embedId = streambucketScraper.id;
|
||||
break;
|
||||
case '2embed.cc':
|
||||
case 'www.2embed.cc':
|
||||
// Just ignore this. This embed just sources from other embeds we can scrape as a 'source'
|
||||
break;
|
||||
case 'player-cdn.com':
|
||||
// Just ignore this. This embed streams video over a custom WebSocket connection
|
||||
break;
|
||||
default:
|
||||
throw new Error(`Failed to find VidSrc embed source for ${finalUrl}`);
|
||||
}
|
||||
|
||||
// Since some embeds are ignored on purpose, check if a valid one was found
|
||||
if (embed.embedId !== '') {
|
||||
embeds.push(embed);
|
||||
}
|
||||
}
|
||||
|
||||
return embeds;
|
||||
}
|
||||
|
||||
export async function getVidSrcMovieSources(ctx: MovieScrapeContext) {
|
||||
return getVidSrcEmbeds(ctx, `/embed/${ctx.media.tmdbId}`);
|
||||
}
|
||||
|
||||
export async function getVidSrcShowSources(ctx: ShowScrapeContext) {
|
||||
// VidSrc will always default to season 1 episode 1
|
||||
// no matter what embed URL is used. It sends back
|
||||
// a list of ALL the shows episodes, in order, for
|
||||
// all seasons. To get the real embed URL, have to
|
||||
// parse this from the response
|
||||
const html = await ctx.proxiedFetcher<string>(`/embed/${ctx.media.tmdbId}`, {
|
||||
baseUrl: vidsrcBase,
|
||||
});
|
||||
|
||||
const $ = load(html);
|
||||
|
||||
const episodeElement = $(`.ep[data-s="${ctx.media.season.number}"][data-e="${ctx.media.episode.number}"]`).first();
|
||||
if (episodeElement.length === 0) {
|
||||
throw new Error('failed to find episode element');
|
||||
}
|
||||
|
||||
const startingURL = episodeElement.attr('data-iframe');
|
||||
if (!startingURL) {
|
||||
throw new Error('failed to find episode starting URL');
|
||||
}
|
||||
|
||||
return getVidSrcEmbeds(ctx, startingURL);
|
||||
}
|
@@ -38,7 +38,7 @@ export async function formatSource(
|
||||
embed.embedId = mixdropScraper.id;
|
||||
break;
|
||||
default:
|
||||
throw new Error(`Failed to find ZoeChip embed source for ${link}`);
|
||||
return null;
|
||||
}
|
||||
|
||||
return embed;
|
||||
|
@@ -4,7 +4,6 @@ import { Caption } from '@/providers/captions';
|
||||
export type StreamFile = {
|
||||
type: 'mp4';
|
||||
url: string;
|
||||
headers?: Record<string, string>;
|
||||
};
|
||||
|
||||
export type Qualities = 'unknown' | '360' | '480' | '720' | '1080' | '4k';
|
||||
|
@@ -116,9 +116,12 @@ export async function runAllProviders(list: ProviderList, ops: ProviderRunnerOpt
|
||||
};
|
||||
}
|
||||
|
||||
if (output.embeds.length > 0) {
|
||||
// run embed scrapers on listed embeds
|
||||
const sortedEmbeds = output.embeds.sort((a, b) => embedIds.indexOf(a.embedId) - embedIds.indexOf(b.embedId));
|
||||
|
||||
if (sortedEmbeds.length > 0) {
|
||||
ops.events?.discoverEmbeds?.({
|
||||
embeds: output.embeds.map((v, i) => ({
|
||||
embeds: sortedEmbeds.map((v, i) => ({
|
||||
id: [s.id, i].join('-'),
|
||||
embedScraperId: v.embedId,
|
||||
})),
|
||||
@@ -126,10 +129,6 @@ export async function runAllProviders(list: ProviderList, ops: ProviderRunnerOpt
|
||||
});
|
||||
}
|
||||
|
||||
// run embed scrapers on listed embeds
|
||||
const sortedEmbeds = output.embeds;
|
||||
sortedEmbeds.sort((a, b) => embedIds.indexOf(a.embedId) - embedIds.indexOf(b.embedId));
|
||||
|
||||
for (const ind in sortedEmbeds) {
|
||||
if (!Object.prototype.hasOwnProperty.call(sortedEmbeds, ind)) continue;
|
||||
const e = sortedEmbeds[ind];
|
||||
|
@@ -2,8 +2,8 @@ import { MovieMedia, ShowMedia } from '@/entrypoint/utils/media';
|
||||
import { UseableFetcher } from '@/fetchers/types';
|
||||
|
||||
export type ScrapeContext = {
|
||||
proxiedFetcher: <T>(...params: Parameters<UseableFetcher<T>>) => ReturnType<UseableFetcher<T>>;
|
||||
fetcher: <T>(...params: Parameters<UseableFetcher<T>>) => ReturnType<UseableFetcher<T>>;
|
||||
proxiedFetcher: UseableFetcher;
|
||||
fetcher: UseableFetcher;
|
||||
progress(val: number): void;
|
||||
};
|
||||
|
||||
|
Reference in New Issue
Block a user