pandas 如何点击一个盒子与 selenium ,我看不到它的元素?

1yjd4xko  于 2023-06-20  发布在  其他
关注(0)|答案(3)|浏览(101)

我想在fxblue技术分析中将frametime从1h(默认值)更改为5m,但我无法单击其弹出按钮。下面是我尝试过的代码:

import pandas as pd
import numpy as np
import csv
import os
from selenium import webdriver
driver = webdriver.Chrome(os.getcwd() + '/chromedriver')  
url = "https://www.fxblue.com/market-data/technical-analysis/EURUSD"
driver.get(url)
time.sleep(5)
timestamp = driver.find_element_by_xpath('//*[@id="TimeframeContainer"]').click()

在这一点上,我可以看到弹出的时间框架,但我找不到改变时间框架的方法。

qxgroojn

qxgroojn1#

Timeframe弹出窗口中的元素位于iframe中。需要switch to frame来与其中包含的元素进行交互。

# Imports required
from selenium.webdriver.common.by import By
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC

driver.get("https://www.fxblue.com/market-data/technical-analysis/EURUSD")

wait = WebDriverWait(driver,30)

# Click on timestamp button.
timestamp = wait.until(EC.element_to_be_clickable((By.ID,"txtTimeframe")))
timestamp.click()

# Switch to iframe.
wait.until(EC.frame_to_be_available_and_switch_to_it((By.XPATH,"//iframe[contains(@class,'DialogInnerIframe')]")))

# click on M5 button.
fivemin = wait.until(EC.element_to_be_clickable((By.XPATH,"//div[@class='TimeframeItem' and text()='M5']")))
fivemin.click()

# Switch to default content to interact with other elements.
driver.switch_to.default_content()
qij5mzcb

qij5mzcb2#

单击该元素将打开一个对话框。
因此,您需要在该对话框中选择并单击所需的元素。
对话框在iframe中,您必须切换到iframe。
选择所需选项后,对话框关闭,您必须从iframe切换回默认内容。
此外,您应该使用显式等待而不是硬编码的暂停。
您的代码可能类似于以下内容

import pandas as pd
import numpy as np
import csv
import os
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
driver = webdriver.Chrome(os.getcwd() + '/chromedriver')  
wait = WebDriverWait(driver, 20)

url = "https://www.fxblue.com/market-data/technical-analysis/EURUSD"
driver.get(url)
wait.until(EC.visibility_of_element_located((By.ID, "TimeframeContainer"))).click()
wait.until(EC.frame_to_be_available_and_switch_to_it((By.CSS_SELECTOR,"iframe.DialogDragBar")))
driver.find_element_by_xpath("//div[@tf='300']").click()
driver.switch_to.default_content()
gg0vcinb

gg0vcinb3#

感谢可爱的脚本,我有一些问题,从我的结束对相同和这个代码片段帮助我解决他们
做了一些小的调整与记录器和分享相同的利益社区

"""
@author:  ""
@copyright:  ""
@credits:  [""]
@license:  ""
@version: ""
@Date: "" 
@maintainer: ""
@email: ""
@status: ""
@Function: "To scrape data from fx blue for technical analysis"
"""

import logging
from datetime import datetime
import pandas as pd
from selenium import webdriver
from selenium.webdriver.chrome.service import Service
from selenium.webdriver.common.by import By
from selenium.webdriver.chrome.options import Options
import os
from bs4 import BeautifulSoup
import time
import os.path
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC

# Define the URL of the webpage to scrape
url = "https://www.fxblue.com/market-data/technical-analysis/"

class WebScraper:
    def __init__(self):
        self.url = url
        self.driver_path = os.getcwd() + '/chromedriver'  # Path to Chrome driver executable
        self.logger = self.setup_logger()
        self.now= datetime.now()
        self.dt_string = self.now.strftime("%d/%m/%Y %H:%M:%S")        

    def setup_logger(self):
        logger = logging.getLogger("WebScraper")
        logger.setLevel(logging.INFO)
        
        # Create a file handler to log the execution status
        log_file = f"logs/scraper_logs_{datetime.now().strftime('%Y-%m-%d_%H-%M')}.log"
        
        file_handler = logging.FileHandler(log_file)
        file_handler.setLevel(logging.INFO)

        # Create a formatter for the log messages
        formatter = logging.Formatter('%(asctime)-15s - %(levelname)s - %(filename)s - %(funcName)s - %(lineno)d - %(message)s')
        file_handler.setFormatter(formatter)

        # Add the file handler to the logger
        logger.addHandler(file_handler)
        #print(log_file)
        return logger

    def parse_data(self):
        try:
            service = Service(self.driver_path)
            chrome_options = Options()
            #chrome_options.add_argument("--headless")
            driver = webdriver.Chrome(service=service, options=chrome_options)
            driver.get(self.url)
            time.sleep(5)
        except Exception as e:
            print("Error occurred during fetching page:", str(e))
            
        
        # Add code to parse the HTML content and extract the data
        self.dt_string = time.strftime("%d/%m/%Y %H:%M:%S")
        
        '''
        main functionality begins here
        '''
        
        now= datetime.now()
        dt_string = now.strftime("%d/%m/%Y %H:%M:%S")
        results = []
        '''
        instrument = ["AUDCAD", "AUDJPY","AUDUSD","CADJPY","EURGBP","EURJPY","EURUSD",
                     "GBPUSD", "USDCAD","USDJPY","XAUUSD","WTI","BRENT",
                     "SP500", "NAS100","DJ30","ASX200","HK50","JP225","FTSE100",
                     "DAX", "BTCUSD","ETHUSD"]
        timeframe = ["M30", "H1", "H4", "D1", "W1"]
        '''
        instrument = ["AUDCAD", "AUDJPY"]
        timeframe = ["M30", "H1", "H4"]
        try:    
            for ins in instrument:
                driver = webdriver.Chrome(os.getcwd() + '/chromedriver')  
                url = "https://www.fxblue.com/market-data/technical-analysis/" + str(ins)
                driver.get(url)
                wait = WebDriverWait(driver,30)
                time.sleep(5)
                for tf in timeframe:
                    # Click on timestamp button.
                    timestamp = wait.until(EC.element_to_be_clickable((By.ID,"txtTimeframe")))
                    timestamp.click()
    
                    # Switch to iframe.
                    wait.until(EC.frame_to_be_available_and_switch_to_it((By.XPATH,"//iframe[contains(@class,'DialogInnerIframe')]")))
    
                    # click on M5 button.
                    fivemin = wait.until(EC.element_to_be_clickable((By.XPATH,"//div[@class='TimeframeItem' and text()='%s']"%tf)))
                    fivemin.click()
                    
                    result = {}
                    buy = driver.find_elements(By.XPATH, "//*[@id='TAScoreLabel']")
                    buy = [x.text for x in buy]
                    #print(buy)
                    value = buy[0]
                    result['instrument'] = ins
                    result['buy'] = value
                    result['Timing'] = tf
                    result['timestamp'] = dt_string
                    results.append(result)
                #print(results)
    
                time.sleep(5)
                driver.quit()
            return results
        except Exception as e:
            self.logger.warning("Failed to scrape data from the URL.")
            print("Error occurred during data parsing:", str(e))
            return None
        #close the driver 
        driver.close()
        driver.quit()

    def execute(self):
        self.logger.info("Execution started.")
        scraped_data = self.parse_data()
        if scraped_data is not None:
            #print(scraped_data)
            # Create a DataFrame from the scraped data
            
            df = pd.DataFrame(scraped_data) # Each list would be added as a row
            #df = df.transpose() # To Transpose and make each rows as columns
            
            try:                
                df.to_csv('fxblue_technical_analysis_%s.csv'%datetime.now().strftime('%Y-%m-%d-%H-%M-%S'),  mode='a', index=False)
                self.logger.info("Data processing and save complete.")
            except Exception as e:
                self.logger.warning("Failed to save - :",e)
            
            # Process the DataFrame as needed
            self.logger.info("Data processing and save complete.")
            #self.logger.info("Scraped data:")
            
        else:
            self.logger.warning("Failed to save")
        self.logger.info("Execution finished.")
        
        

# Create an instance of the WebScraper class
scraper = WebScraper()

# Execute the web scraping process
scraper.execute()

相关问题