Commit 46cf839b authored by Devon Kearns's avatar Devon Kearns

Imported Upstream version 2.21

parent 76da1a04
......@@ -110,6 +110,7 @@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
PACKAGE_NAME = @PACKAGE_NAME@
PACKAGE_STRING = @PACKAGE_STRING@
PACKAGE_TARNAME = @PACKAGE_TARNAME@
PACKAGE_URL = @PACKAGE_URL@
PACKAGE_VERSION = @PACKAGE_VERSION@
PATH_SEPARATOR = @PATH_SEPARATOR@
SET_MAKE = @SET_MAKE@
......
......@@ -5,7 +5,6 @@
darkraver@open-labs.org (http://dirb.sf.net)
What is DIRB?
------------
......@@ -27,11 +26,10 @@ nor does it look for web contents that can be vulnerables.
Maybe the last try for an unlucky security analyst... :)
What is NOT?
------------
DIRB is NOT a Web Spider. It doesn’t follow HTML links (by now). It searches
DIRB is NOT a Web Spider. It doesn't follow HTML links (by now). It searches
content by rules and dictionary based attacks.
DIRB is NOT a Web Downloader. It doesn't download Web Pages (by now), only
......@@ -41,8 +39,7 @@ DIRB is NOT a Web Vulnerability Scanner. It does not look for bugs. But it's
designed for helping in web vulnerability assessment.
INSTALLATION
Installation
------------
DIRB is based on libcurl so you need to install this library where autoconf
......@@ -52,8 +49,7 @@ can locate it. Once libcurl is installed properly you must only do:
$ make
USAGE
Usage
-----
DIRB takes 2 main parameters, the base URL for testing and a list of wordlist
......@@ -68,12 +64,12 @@ with a word by line. It is also possible to scan subdirectories directly:
$ ./dirb.exe http://www.test.org/html/ common.txt
For SSL simple include the HTTPS url:
For SSL simply include the HTTPS url:
$ ./dirb.exe https://www.test.org/ common.txt -i
You can use multiple wordfiles at a time this way (separated by coma):
You can use multiple wordfiles at a time this way (separated by comma):
$ ./dirb.exe https://www.test.org/ common.txt,spanish.txt,names.txt
......@@ -84,10 +80,9 @@ the -X option:
$ ./dirb.exe https://www.test.org/ common.txt -X .html,.asp,.jsp,,
$ ./dirb.exe https://www.test.org/ common.txt -x extensions.txt
EXAMPLES
Examples
--------
+ Scan a webserver for common directories/files: (without using file
......@@ -108,7 +103,7 @@ files)
BUGS
Bugs
----
There are a lot :)
......@@ -116,13 +111,12 @@ There are a lot :)
Please notify them to: darkraver@open-labs.org
CREDITS
Credits
-------
Project manager: The Dark Raver
Contributors: Sage, Jfs, Warezzman, The Dark Raver
Contributors: Sage, Jfs, Warezzman, The Dark Raver, Sha0, Hubert Seiwert, Pablo Catalina
Beta-testers, Ideas: Necronoid, Fatuo, IaM, Laramies, Mandingo
......
......@@ -18,6 +18,9 @@
/* Define to the one symbol short name of this package. */
#undef PACKAGE_TARNAME
/* Define to the home page for this package. */
#undef PACKAGE_URL
/* Define to the version of this package. */
#undef PACKAGE_VERSION
......
This source diff could not be displayed because it is too large. You can view the blob instead.
......@@ -2,7 +2,7 @@ dnl ++ Starting
dnl +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
AC_PREREQ(2.50)
AC_INIT(dirb,2.03,darkraver@openlabs.org)
AC_INIT(dirb,2.21,darkraver@open-labs.org)
AC_CONFIG_SRCDIR(src/dirb.c)
AM_INIT_AUTOMAKE(AC_PACKAGE_NAME, AC_PACKAGE_VERSION)
AM_CONFIG_HEADER(config.h)
......@@ -10,7 +10,6 @@ AM_CONFIG_HEADER(config.h)
AC_CONFIG_FILES(Makefile src/Makefile gendict_src/Makefile web2dic/Makefile)
dnl ++ Checks for curl-config
dnl +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
......@@ -47,14 +46,12 @@ AC_SUBST(NETWORK_CFLAGS)
AC_SUBST(NETWORK_LIBS)
dnl ++ Checks for curl_easy_init in libcurl
dnl +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
AC_CHECK_LIB(curl, curl_easy_init,, AC_MSG_ERROR(Can't find function curl_easy_init in -lcurl. LibCurl is required.))
dnl ++ Final message
dnl +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
......
DIRB CHANGELOG
==============
*2.2.1 (24/04/2013)
------
- FEATURE: Añadida hotkey 'r' que muestra las pruebas restantes
- FEATURE: Añadida funcion count_words()
- FEATURE: Añadido soporte para multiples cabeceras, parche de Pablo Catalina
- FEATURE: Cambiada la ruta por defecto de las wordlists a /usr/share/dirb/wordlists, parche de Pablo Catalina
- FEAUTRE: La carpeta de resume ahora va en el home y se crea si no existe, parche de Pablo Catalina
2.2.0 (23/04/2013)
-----
- FEAUTRE: añadida funcion compare_str() que devuelve la posicion del primer caracter diferente
- BUG: corregido el uso de finetunning para paginas de tipo 30x en lanza_ataque()
- FEATURE: mejorada funcion location_clean()
- CLEAN: limpieza de wordlists
- BUG: corregida funcion location_clean(), se sobre-escribia parte del valor por si mismo
- FEATURE: añadida wordlist hpsmh (hp system management homepage)
- CLEAN: actualizada rutina para opcion -v en lanza_ataque()
- BUG: corregido uso incorrecto de url_base en lanza_ataque()
- FEATURE: ahora en caso de error subsanable se pasa al siguiente directorio en vez de salir
- CLEAN: eliminado el uso de resuming en lanza_ataque()
- CLEAN: unificado el uso de get_necs()
- CLEAN: modificadas las funciones calcula_nec() y resume() para permitir la recuperacion en caso de error al calcular el NEC
- CLEAN: corregidas algunas erratas en los mensajes en ingles
- BUG: corregido uso erroneo de get_necs() en resume.c
- FEATURE: añadida wordlist axis y ror (ruby on rails), ampliadas oas y tomcat
2.1.0 (25/05/2011)
-----
- CLEAN: corregidas erratas en la documentacion
- CLEAN: depuradas todas las llamadas a strncat()
- CLEAN: optimizada funcion barra()
- FEATURE: informacion de URLs probadas ajustado al ancho del terminal
- BUG: cambiados los printf() que mostraban y borraban la URL en lanza_ataque() para evitar saltos de linea indebidos
- FEATURE: cambio formato output (todo en 1 linea)
- FEATURE: añadidas wordlists: wps (websphere portal server), sap (sap j2ee), domino (actualizacion)
2.0.4 (06/04/2009)
-----
- BUG: opcion -R falla, fix por Hubert Seiwert
- FEATURE: no forzamos la / final si hay parametros en la url
- BUG: la deteccion de codigos 401, 403 y 500 recurrentes fallaba
2.0.3 (27/01/2009)
-----
......@@ -354,4 +404,5 @@ al calcular la longitud de la pagina HTML recibida.
- Reorganizado el codigo para hacerlo menos compacto
- Ampliados y depurados la mayoria de wordlists
- Añadido TIMEOUT para las peticiones de URLs
- Añadido fichero changes.txt :)
\ No newline at end of file
- Añadido fichero changes.txt :)
DIRB FAQ
========
Questions:
1) Can I use multiple wordfiles at a time?
2) I Know 3 directories exist, how can I scan them?
......@@ -19,19 +18,17 @@ webserver and they don't found anything. DIRB can give me some hope?
7) Can DIRB be used like a classical CGI scanner?
8) What is the NEC?
8) What is the NOT_FOUND code?
9) What about a multi-thread version?
####################################################
1) Can I use multiple wordfiles at a time?
YES, the wordfile parameter let you specify multiple files separated by
YES, the wordfile parameter lets you specify multiple files separated by
coma: ","
Example:
......@@ -39,8 +36,7 @@ Example:
$./dirb http://www.site.com/ wordlist1.txt,wordlist2.txt,wordlist3.txt
2) I Know 3 directories exist, how can I scan them?
2) I know 3 directories exist, how can I scan them?
You have 2 options:
......@@ -66,7 +62,6 @@ directory3
----------------
3) I'm newbie in security testing. Is DIRB an easy tool?
NO. DIRB is a tool for automating the search of (normally hidden) web
......@@ -74,17 +69,15 @@ applications. But once you have found them, you need a good knowledge on
security and penetration testing to get advantage of this information.
4) Can DIRB find or exploit vulnerabilities?
NO. DIRB look for web objects. To determine if they are vulnerable or not,
you must use your own intelligence or other kind of tool.
5) Can you include a description of each item found?
NO. DIRB scan generic contents. I don't know what exists in each one.
NO. DIRB scan generic contents. I don't know what exists behind each one.
Maybe some administrator use a file named "test.asp" as his main
administration menu or simply is a "hello world" script...
......@@ -97,7 +90,6 @@ YES. DIRB can found something that classic CGI scanners can't found. Maybe
it's your last chance.
7) Can DIRB be used like a classical CGI scanner?
YES. You only need to define a wordlist with common vulnerable CGI names, and
......@@ -105,8 +97,7 @@ feed this wordfile into DIRB. (The default dirb distribution comes
with a wordfile for this usage: "wordlists/vulns/cgis.txt")
8) What is the NOT_FOUND?
8) What is the NOT_FOUND code?
NOT_FOUND is the response code that gives a webserver for not existant pages or
documents. DIRB use this code to locate only the correct existant pages and
......@@ -115,12 +106,11 @@ but in some cases the NOT_FOUND code is not 404 and most CGI scanners will fail
in detecting existing pages.
9) What about a multi-thread version?
I have tested a simplified demo version of dirb running multiple threads and
the speed ganancy was about 20-40%. This ganancy is not significant and the
complexity of the code does (by now) not practical to run dirb with threads.
the speed gain was about 20-40%. This improvement is not significant and the
complexity of the code does it (by now) not very useful.
......@@ -12,7 +12,6 @@ number of times. This wildcard will be replaced with the corresponding type
of character.
Usage
-----
......@@ -27,7 +26,6 @@ Usage
will be replaced with the incremental value.
Example
-------
......
......@@ -3,47 +3,35 @@ DIRB TODO LIST
++ Alta prioridad:
-> Resultado en 1 sola linea
- Añadir funcion test_dir()
++ Prioridad media:
- Tecla activa que muestre las palabras que faltan 'r'
- Opcion de mostrar informacion del finetuning
++ Baja prioridad:
- Meter libcurl en el paquete de instalacion
- Que guarde a disco todo lo bajado
- No compila en AIX (mirar la forma de portarlo)
- Revisar alternativas a -t
- Cuello de botella (elimina_dupwords()) => Ordenar la lista de palabras?
- Unificar funcion de debug()
- Que guarde a disco todo lo encontrado
- Cuello de botella (elimina_dupwords()) => ¿Ordenar la lista de palabras?
- Unificar funcion de debug()
++ A largo plazo:
*** Modulos
- Importar wget
- Importar httrack
- Importar burp
- Importar spider
*** Mejor deteccion de directorios
-> Como detectar el directorio /cgi-bin => 403 en apache
*** Modo inteligente (recorta el arbol de pruebas) --> algoritmos geneticos
- Deteccion de Apache easy find -> Options MultiViews => Content-Location: xxxx.php
- Deteccion de Apache Options MultiViews => Content-Location: xxxx.php
*** Dirb disribuido/multithread
Sitios de test:
http://www.intersil.com/ - nec=200 / variable (.asp, .html)
http://www.invertia.com/ - nec=200
http://www.yonkis.com/ - nec=302
http://www.willard.k12.oh.us/webmail/ns-icons/
http://www.nwi.fws.gov/bd4/netscape/server4/ns-icons/
http://127.0.0.1:8000/
......@@ -9,11 +9,10 @@ don't need to include the file extension in the search, the system will tell
you the right name through a "Content-Location" header.
2) Using extensions
-> Before starting your scan. Navigate through the target URL and get the
most used file extensions. Include they in a extensions file (one extension by
most used file extensions. Include them in a extensions file (one extension by
line) and use it in your scan.
Example extensions file:
......@@ -31,16 +30,15 @@ line:
-X ,,.asp,.txt,.html
3) Selective scanning
-> If you don't want to scan uninteresting directories like /images, /css,
-> If you don't want to scan uninteresting directories like /images, /css, /js,
etc... You can use the mode -R (interactive recursion) and DIRB will ask you
in which subdirectories you want to scan and in which you don't want.
for which subdirectories you want to scan and for which you don't.
4) Scanning IIS webservers
-> IIS webserver URLs are case insensitives, so you can use the mode -i to cut
down the number of tries.
\ No newline at end of file
down the number of tries.
......@@ -100,6 +100,7 @@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
PACKAGE_NAME = @PACKAGE_NAME@
PACKAGE_STRING = @PACKAGE_STRING@
PACKAGE_TARNAME = @PACKAGE_TARNAME@
PACKAGE_URL = @PACKAGE_URL@
PACKAGE_VERSION = @PACKAGE_VERSION@
PATH_SEPARATOR = @PATH_SEPARATOR@
SET_MAKE = @SET_MAKE@
......
......@@ -37,16 +37,15 @@ int main(int argc, char **argv) {
} else if (strcmp(argv[1], "-h")==0) {
charset=charset_hexa;
} else if (strcmp(argv[1], "-a")==0) {
charset=charset_alfanum;
} else if (strcmp(argv[1], "-C")==0) {
charset=charset_upperchar;
} else if (strcmp(argv[1], "-s")==0) {
charset=charset_sensitivealfanum;
charset=charset_alfanum;
} else if (strcmp(argv[1], "-C")==0) {
charset=charset_upperchar;
} else if (strcmp(argv[1], "-s")==0) {
charset=charset_sensitivealfanum;
} else {
usage(argv[0]);
usage(argv[0]);
}
gen("", 0, argv[2], charset, wildcard);
exit(0);
......
......@@ -102,6 +102,7 @@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
PACKAGE_NAME = @PACKAGE_NAME@
PACKAGE_STRING = @PACKAGE_STRING@
PACKAGE_TARNAME = @PACKAGE_TARNAME@
PACKAGE_URL = @PACKAGE_URL@
PACKAGE_VERSION = @PACKAGE_VERSION@
PATH_SEPARATOR = @PATH_SEPARATOR@
SET_MAKE = @SET_MAKE@
......
......@@ -9,8 +9,6 @@
#include "http_codes.h"
/*
* GET_NECS: Obtienes los NECs correspondientes a cada extension
*
......@@ -106,7 +104,6 @@ struct result *calcula_nec(char *direccion) {
case 302:
if(options.finetunning==1) {
location_clean(nec1.location, rand_url1);
if(options.debuging>3) printf("[+++] calcula_nec() CLEANED1: %s\n", nec1.location);
}
break;
......@@ -143,7 +140,7 @@ struct result *calcula_nec(char *direccion) {
if(nec1.body_size!=nec2.body_size) {
IMPRIME("(!) WARNING: NOT_FOUND[%s] page not stable, unable to determine the correct URLs {200}.\n", exts_current->word);
IMPRIME(" (Try using FineTunning: '-f')\n");
if(options.exitonwarn) exit(-3);
if(options.exitonwarn) { next_dir=1; }
} else {
mynec->body_size=nec1.body_size;
}
......@@ -153,13 +150,12 @@ struct result *calcula_nec(char *direccion) {
case 302:
if(options.finetunning==1) {
location_clean(nec2.location, rand_url2);
if(options.debuging>3) printf("[+++] calcula_nec() CLEANED2: %s\n", nec2.location);
}
if(strncmp(nec1.location, nec2.location, STRING_SIZE-1)!=0) {
IMPRIME("(!) WARNING: NOT_FOUND[%s] not stable, unable to determine correct URLs {30X}.\n", exts_current->word);
IMPRIME(" (Try using FineTunning: '-f')\n");
if(options.exitonwarn) exit(-3);
if(options.exitonwarn) { next_dir=1; }
} else {
strncpy(mynec->location, nec1.location, STRING_SIZE-1);
}
......@@ -174,8 +170,8 @@ struct result *calcula_nec(char *direccion) {
} else {
IMPRIME("(!) WARNING: NOT_FOUND[%s] not stable, unable to determine the correct URLs {%d,%d}.\n", exts_current->word, nec1.codigo_http, nec2.codigo_http);
IMPRIME(" (Try using Application Server Mode)\n");
if(options.exitonwarn) exit(-3);
IMPRIME(" (Server is returning random responses)\n");
if(options.exitonwarn) { next_dir=1; }
}
mynec->codigo_http=nec1.codigo_http;
......
......@@ -70,18 +70,18 @@ struct words *crea_wordlist(char *ficheros) {
break;
}
//limpiamos
limpia_url(cbuffer);
if(strlen(cbuffer)) {
//comentario
if(cbuffer[0]=='#') cbuffer[0]='\0';
if(strlen(cbuffer)) {
strncpy(current->word, cbuffer, STRING_SIZE-1);
contador++;
current->siguiente=(struct words *)malloc(sizeof(struct words));
memset(current->siguiente, 0, sizeof(struct words));
current=current->siguiente;
}
}
......@@ -245,5 +245,23 @@ struct words *crea_extslist(char *lista) {
}
/*
* COUNT_WORDS: Cuenta las palabras de una wordlist
*
*/
int count_words(struct words *list) {
int count=0;
struct words *ptr;
ptr=list;
while(ptr->siguiente!=0) {
count++;
ptr=ptr->siguiente;
}
return count;
}
......@@ -8,7 +8,6 @@
#include "dirb.h"
/*
* MAIN: Nucleo del programa
*
......@@ -29,6 +28,7 @@ int main(int argc, char **argv) {
options.default_nec=404;
options.lasting_bar=1;
options.speed=0;
options.add_header=0;
encontradas=0;
descargadas=0;
......@@ -55,13 +55,12 @@ int main(int argc, char **argv) {
if(strncmp(argv[1], "-resume", 7)==0) {
printf("(!) RESUMING...\n\n");
resume();
exit(0);
}
strncpy(options.url_inicial, argv[1], STRING_SIZE-1);
if(argc==2 || strncmp(argv[2], "-", 1)==0) {
strncpy(options.mfile, "wordlists/common.txt", STRING_SIZE-1);
strncpy(options.mfile, "/usr/share/dirb/wordlists/common.txt", STRING_SIZE-1);
optind+=1;
} else {
strncpy(options.mfile, argv[2], STRING_SIZE-1);
......@@ -93,8 +92,13 @@ int main(int argc, char **argv) {
strncpy(options.vhost, optarg, STRING_SIZE-1);
break;
case 'H':
options.add_header=1;
strncpy(options.header_string, optarg, STRING_SIZE-1);
if(options.add_header) {
strcat(options.header_string, "\n");
strncat(options.header_string, optarg, STRING_SIZE-strlen(options.header_string)-2);
} else {
strncpy(options.header_string, optarg, STRING_SIZE-1);
}
options.add_header++;
break;
case 'i':
options.insensitive=1;
......@@ -173,11 +177,10 @@ int main(int argc, char **argv) {
limpia_url(options.url_inicial);
if(options.lasting_bar) barra(options.url_inicial);
if(options.lasting_bar && !strchr(options.url_inicial, '?')) barra(options.url_inicial);
check_url(options.url_inicial);
limpia_url(options.mfile);
......@@ -195,11 +198,6 @@ int main(int argc, char **argv) {
palabras=crea_wordlist(options.mfile);
// Chequeo del NEC
get_necs(options.url_inicial);
// Abrimos el fichero de mutations y creamos la lista
/*
......@@ -259,6 +257,7 @@ void ayuda(void) {
printf("\n======================== HOTKEYS ========================\n");
printf(" 'n' -> Go to next directory.\n");
printf(" 'q' -> Stop scan. (Saving state for resume)\n");
printf(" 'r' -> Remaining scan stats.\n");
printf("\n======================== OPTIONS ========================\n");
printf(" -a <agent_string> : Specify your custom USER_AGENT.\n");
......@@ -290,7 +289,7 @@ void ayuda(void) {
printf("\n======================== EXAMPLES =======================\n");
printf(" ./dirb http://url/directory/ (Simple Test)\n");
printf(" ./dirb http://url/ -X .html (Test files with '.html' extension)\n");
printf(" ./dirb http://url/ wordlists/vulns/apache.txt (Test with apache.txt wordlist)\n");
printf(" ./dirb http://url/ /usr/share/dirb/wordlists/vulns/apache.txt (Test with apache.txt wordlist)\n");
printf(" ./dirb https://secure_url/ (Simple Test with SSL)\n");
}
......
......@@ -29,8 +29,8 @@ struct result {
// Codigo HTTP
struct code {
int codenum;
char desc[STRING_SIZE];
int codenum;
char desc[STRING_SIZE];
};
......
......@@ -9,41 +9,43 @@
/* Funciones */
// dirb.c
void banner(void);
void ayuda(void);
// get_url.c
struct result get_url(char *resp_url);
size_t get_header(void *ptr, size_t size, size_t nmemb, void *stream);
size_t get_body(void *ptr, size_t size, size_t nmemb, void *stream);
// lanza_ataque.c
void lanza_ataque(char *url_base, struct words *wordlist);
// options.c
void get_options(void);
// calculanec.c
int get_necs(char *direccion);
struct result *calcula_nec(char *direccion);
// crea_wordlist.c
struct words *crea_wordlist(char *ficheros);
struct words *crea_wordlist_fich(char *fichero);
struct words *crea_extslist(char *lista);
FILE *abrir_file(char *file);
void check_url(char *url);
int count_words(struct words *list);
// utils.c
void limpia_url(char *limpia);
void barra(char *barr);
void guardadir(char *direccion);
void elimina_dupwords(struct words *puntero);
FILE *abrir_file(char *file);
int location_cmp(char *A, char *B);
void location_clean(char *cleaned, char *toelim);
void check_url(char *url);
int islistable(char *direccion);
char kbhit(void);
void cierre(void);
char *code2string(struct code *a, u_int v);
void init_exts(void);
void cierre(void);
char *uri_decode(char *uri);
int compare_str(char *A, char *B);
// resume.c
int mkpath(const char *s, mode_t mode);
void dump(void);
void resume(void);
......
......@@ -58,7 +58,7 @@ retry:
if(options.use_vhost) {
strncpy(host_header, "Host: ", 6);
strncat(host_header, options.vhost, STRING_SIZE-1-6);
strncat(host_header, options.vhost, STRING_SIZE-1-strlen(host_header));
slist = curl_slist_append(slist, host_header);
curl_easy_setopt(curl, CURLOPT_HTTPHEADER, slist);
}
......@@ -87,6 +87,7 @@ retry:
if(options.verify_ssl==0) {
curl_easy_setopt(curl, CURLOPT_SSL_VERIFYPEER, 0);
curl_easy_setopt(curl, CURLOPT_SSL_VERIFYHOST, 0);
curl_easy_setopt(curl, CURLOPT_SSLVERSION, 3);
}
......@@ -123,18 +124,18 @@ retry:
/* devolvemos la estructura */
if(options.debuging>1) {
printf("\nURL: %s\n", estructura.url);
printf("ESTADO: %d\n", estructura.estado);
printf("CODIGO_HTTP: %d\n", estructura.codigo_http);
printf("HEAD_SIZE: %d\n", estructura.head_size);
printf("HEAD_LINES: %d\n", estructura.head_lines);
printf("BODY_SIZE: %d\n", estructura.body_size);
printf("BODY_WORDS: %d\n", estructura.body_words);
printf("BODY_LINES: %d\n", estructura.body_lines);
printf("LOCATION: %s\n", estructura.location);
printf("SERVER: %s\n", estructura.server);
}
if(options.debuging>1) {
printf("\nURL: %s\n", estructura.url);
printf("ESTADO: %d\n", estructura.estado);
printf("CODIGO_HTTP: %d\n", estructura.codigo_http);
printf("HEAD_SIZE: %d\n", estructura.head_size);
printf("HEAD_LINES: %d\n", estructura.head_lines);
printf("BODY_SIZE: %d\n", estructura.body_size);
printf("BODY_WORDS: %d\n", estructura.body_words);
printf("BODY_LINES: %d\n", estructura.body_lines);
printf("LOCATION: %s\n", estructura.location);
printf("SERVER: %s\n", estructura.server);
}
return estructura;
......@@ -218,7 +219,7 @@ size_t get_body(void *ptr, size_t size, size_t nmemb, void *stream) {
if(listable==-1) {
if(strstr(ptr, "Parent Directory")!=0 || strstr(ptr, "Up To ")!=0 || strstr(ptr, "Atrs A ")!=0 || strstr(ptr, "Al directorio primario")!=0) {
if(strstr(ptr, "Parent Directory")!=0 || strstr(ptr, "Up To ")!=0 || strstr(ptr, "Atrs A ")!=0 || strstr(ptr, "Al directorio pri")!=0 || strstr(ptr, "Directory Listing For")!=0) {
if(options.debuging>3) printf("[+++] get_body() Directory is listable\n");
listable=1;