I'm trying to scrap company's profile URL from a LinkedIn search but I got "not found". Every things worked well in my code here it is:
import requests
import csv
import time
import numpy
from bs4 import BeautifulSoup
from time import sleep
from selenium import webdriver
import pandas as pd
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
import time
import re
# Read the keywords from a file
with open("keywords.txt", "r") as file:
keywords = file.read().splitlines()
# Define the User-Agent header
edgedriver_path = '/path/to/edgedriver'
options = webdriver.EdgeOptions()
options.add_argument("--start-maximized")
driver = webdriver.Edge(options=options, executable_path=edgedriver_path)
driver.implicitly_wait(10)
driver.get('https://www.linkedin.com/login')
email_input = driver.find_element(By.ID, 'username')
password_input = driver.find_element(By.ID, 'password')
email_input.send_keys('*************')
password_input.send_keys('*************')
password_input.send_keys(Keys.ENTER)
time.sleep(10)
# Create a new CSV file and write the headers
with open("results.csv", "w", newline="") as file:
writer = csv.writer(file)
writer.writerow(["Keyword", "Total Results"])
# Perform the search for each keyword and write the results in CSV file
for keyword in keywords:
URL = 'https://www.linkedin.com/search/results/companies/?keywords=' + keyword
driver.get(url=URL)
driver.implicitly_wait(5)
soup = BeautifulSoup(driver.page_source, "html.parser")
a_href = soup.find_all('a', {'class':'app-aware-link '})
if a_href:
link= link.get('href')
writer.writerow([keyword, link])
print(f"Keyword: {keyword}, linkedin: {link}")
else:
writer.writerow([keyword, "Not found"])
print(f"Keyword: {keyword}, linkedin: Not found")
The problem is in # Perform the search for each keyword and write the total number of results to the CSV file party. I don't how to use href, a, class and find all to get url of the first result. for example if search for meta the result should be https://www.linkedin.com/company/meta/
I think the main issue here might be that
driver.implicitly_wait(5)inside thefor keyword in keywords:block isn't enough and you need to use some kind of explicit wait instead. For example, I tried with simplytime.sleep(5)as below:and that printed
Note that I removed the csv parts to focus on the search issue, and also, I used chrome instead of edge. Also, I changed how
a_hrefis searched for by using.selectwith CSS selectors because the.find_all('a', {'class':'app-aware-link '})will always give youhref="https://www.linkedin.com/feed/?nis=true"instead of the first search result.